]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Jul 2016 04:35:03 +0000 (21:35 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Jul 2016 04:35:03 +0000 (21:35 -0700)
Pull irq updates from Thomas Gleixner:
 "The irq department delivers:

   - new core infrastructure to allow better management of multi-queue
     devices (interrupt spreading, node aware descriptor allocation ...)

   - a new interrupt flow handler to support the new fangled Intel VMD
     devices.

   - yet another new interrupt controller driver.

   - a series of fixes which addresses sparse warnings, missing
     includes, missing static declarations etc from Ben Dooks.

   - a fix for the error handling in the hierarchical domain allocation
     code.

   - the usual pile of small updates to core and driver code"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (46 commits)
  genirq: Fix missing irq allocation affinity hint
  irqdomain: Fix irq_domain_alloc_irqs_recursive() error handling
  irq/Documentation: Correct result of echnoing 5 to smp_affinity
  MAINTAINERS: Remove Jiang Liu from irq domains
  genirq/msi: Fix broken debug output
  genirq: Add a helper to spread an affinity mask for MSI/MSI-X vectors
  genirq/msi: Make use of affinity aware allocations
  genirq: Use affinity hint in irqdesc allocation
  genirq: Add affinity hint to irq allocation
  genirq: Introduce IRQD_AFFINITY_MANAGED flag
  genirq/msi: Remove unused MSI_FLAG_IDENTITY_MAP
  irqchip/s3c24xx: Fixup IO accessors for big endian
  irqchip/exynos-combiner: Fix usage of __raw IO
  irqdomain: Fix disposal of mappings for interrupt hierarchies
  irqchip/aspeed-vic: Add irq controller for Aspeed
  doc/devicetree: Add Aspeed VIC bindings
  x86/PCI/VMD: Use untracked irq handler
  genirq: Add untracked irq handler
  irqchip/mips-gic: Populate irq_domain names
  irqchip/gicv3-its: Implement two-level(indirect) device table support
  ...

1855 files changed:
.mailmap
Documentation/ABI/testing/configfs-iio
Documentation/ABI/testing/sysfs-bus-iio
Documentation/ABI/testing/sysfs-bus-iio-health-afe440x
Documentation/DocBook/iio.tmpl
Documentation/RCU/Design/Requirements/Requirements.html
Documentation/RCU/stallwarn.txt
Documentation/RCU/whatisRCU.txt
Documentation/devicetree/bindings/extcon/extcon-arizona.txt
Documentation/devicetree/bindings/hwmon/apm-xgene-hwmon.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/jc42.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/trivial-devices.txt
Documentation/devicetree/bindings/iio/adc/brcm,iproc-static-adc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/iio/adc/max1363.txt [new file with mode: 0644]
Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/iio/dac/ad5755.txt [new file with mode: 0644]
Documentation/devicetree/bindings/iio/pressure/bmp085.txt
Documentation/devicetree/bindings/iio/st-sensors.txt
Documentation/devicetree/bindings/phy/phy-da8xx-usb.txt [new file with mode: 0644]
Documentation/devicetree/bindings/phy/rockchip-usb-phy.txt
Documentation/devicetree/bindings/serial/8250.txt
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt [moved from Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt with 75% similarity]
Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
Documentation/devicetree/bindings/usb/usb-ohci.txt
Documentation/gdb-kernel-debugging.txt
Documentation/hwmon/abituguru
Documentation/hwmon/ftsteutates [new file with mode: 0644]
Documentation/hwmon/ina3221 [new file with mode: 0644]
Documentation/hwmon/jc42
Documentation/hwmon/max1668
Documentation/hwmon/sht3x [new file with mode: 0644]
Documentation/hwmon/submitting-patches
Documentation/hwmon/tmp401
Documentation/kbuild/makefiles.txt
Documentation/kernel-parameters.txt
Documentation/memory-barriers.txt
Documentation/sysctl/kernel.txt
Documentation/x86/intel_mpx.txt
Documentation/x86/tlb.txt
Documentation/x86/x86_64/machinecheck
Documentation/x86/x86_64/mm.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/atomic.h
arch/alpha/include/asm/rwsem.h
arch/alpha/include/asm/spinlock.h
arch/arc/include/asm/atomic.h
arch/arc/include/asm/spinlock.h
arch/arc/kernel/time.c
arch/arm/Kconfig
arch/arm/boot/dts/armada-385-linksys.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-r8-chip.dts
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/tegra30-beaver.dts
arch/arm/include/asm/atomic.h
arch/arm/include/asm/efi.h
arch/arm/include/asm/spinlock.h
arch/arm/include/uapi/asm/kvm.h
arch/arm/kernel/smp_twd.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-integrator/Kconfig
arch/arm/mach-keystone/Kconfig
arch/arm/mach-moxart/Kconfig
arch/arm/mach-mvebu/Makefile
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-mxs/Kconfig
arch/arm/mach-nspire/Kconfig
arch/arm/mach-prima2/Kconfig
arch/arm/mach-u300/Kconfig
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/include/asm/atomic.h
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/ptrace.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/entry.S
arch/arm64/mm/fault.c
arch/avr32/include/asm/atomic.h
arch/blackfin/include/asm/atomic.h
arch/blackfin/include/asm/spinlock.h
arch/blackfin/kernel/bfin_ksyms.c
arch/blackfin/mach-bf561/atomic.S
arch/frv/include/asm/atomic.h
arch/frv/include/asm/atomic_defs.h
arch/frv/include/asm/serial.h
arch/h8300/include/asm/atomic.h
arch/hexagon/include/asm/atomic.h
arch/hexagon/include/asm/spinlock.h
arch/ia64/include/asm/atomic.h
arch/ia64/include/asm/mutex.h
arch/ia64/include/asm/rwsem.h
arch/ia64/include/asm/spinlock.h
arch/m32r/boot/compressed/m32r_sio.c
arch/m32r/include/asm/atomic.h
arch/m32r/include/asm/spinlock.h
arch/m68k/coldfire/head.S
arch/m68k/coldfire/m5272.c
arch/m68k/coldfire/pci.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/ifpsp060/src/fpsp.S
arch/m68k/ifpsp060/src/pfpsp.S
arch/m68k/include/asm/atomic.h
arch/m68k/include/asm/dma.h
arch/m68k/include/asm/m525xsim.h
arch/m68k/include/asm/mcfmmu.h
arch/m68k/include/asm/q40_master.h
arch/m68k/mac/iop.c
arch/m68k/math-emu/fp_decode.h
arch/metag/include/asm/atomic_lnkget.h
arch/metag/include/asm/atomic_lock1.h
arch/metag/include/asm/spinlock.h
arch/microblaze/kernel/timer.c
arch/mips/include/asm/atomic.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/spinlock.h
arch/mips/ralink/cevt-rt3352.c
arch/mn10300/include/asm/atomic.h
arch/mn10300/include/asm/spinlock.h
arch/nios2/kernel/time.c
arch/parisc/include/asm/atomic.h
arch/parisc/include/asm/spinlock.h
arch/powerpc/include/asm/atomic.h
arch/powerpc/include/asm/mutex.h
arch/s390/include/asm/atomic.h
arch/s390/include/asm/rwsem.h
arch/s390/include/asm/spinlock.h
arch/sh/include/asm/atomic-grb.h
arch/sh/include/asm/atomic-irq.h
arch/sh/include/asm/atomic-llsc.h
arch/sh/include/asm/spinlock.h
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/spinlock_32.h
arch/sparc/include/asm/spinlock_64.h
arch/sparc/lib/atomic32.c
arch/sparc/lib/atomic_64.S
arch/sparc/lib/ksyms.c
arch/tile/include/asm/atomic.h
arch/tile/include/asm/atomic_32.h
arch/tile/include/asm/atomic_64.h
arch/tile/include/asm/barrier.h
arch/tile/include/asm/bitops_32.h
arch/tile/include/asm/futex.h
arch/tile/lib/atomic_32.c
arch/tile/lib/atomic_asm_32.S
arch/tile/lib/spinlock_32.c
arch/tile/lib/spinlock_64.c
arch/x86/Kconfig
arch/x86/boot/bitops.h
arch/x86/boot/boot.h
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/kaslr.c
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/boot/compressed/pagetable.c
arch/x86/boot/cpu.c
arch/x86/boot/cpucheck.c
arch/x86/boot/cpuflags.c
arch/x86/boot/cpuflags.h
arch/x86/boot/string.c
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/syscalls/syscall_64.tbl
arch/x86/entry/thunk_64.S
arch/x86/entry/vdso/Makefile
arch/x86/entry/vdso/vdso32/sigreturn.S
arch/x86/entry/vdso/vdso32/system_call.S
arch/x86/entry/vdso/vma.c
arch/x86/entry/vsyscall/vsyscall_64.c
arch/x86/events/core.c
arch/x86/events/intel/Makefile
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/lbr.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_snb.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/msr.c
arch/x86/events/perf_event.h
arch/x86/include/asm/Kbuild
arch/x86/include/asm/apic.h
arch/x86/include/asm/apm.h
arch/x86/include/asm/arch_hweight.h
arch/x86/include/asm/archrandom.h
arch/x86/include/asm/asm.h
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_32.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/bios_ebda.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/checksum_32.h
arch/x86/include/asm/compat.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/fpu/types.h
arch/x86/include/asm/fpu/xstate.h
arch/x86/include/asm/intel-mid.h
arch/x86/include/asm/kaslr.h [new file with mode: 0644]
arch/x86/include/asm/kdebug.h
arch/x86/include/asm/local.h
arch/x86/include/asm/mutex_32.h
arch/x86/include/asm/mutex_64.h
arch/x86/include/asm/page_64_types.h
arch/x86/include/asm/percpu.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/preempt.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/rmwcc.h
arch/x86/include/asm/rwsem.h
arch/x86/include/asm/signal.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/sync_bitops.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/topology.h
arch/x86/include/asm/trace/fpu.h [new file with mode: 0644]
arch/x86/include/asm/tsc.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/unistd.h
arch/x86/include/asm/x86_init.h
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_flat_64.c
arch/x86/kernel/apic/apic_noop.c
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/probe_32.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/apic/x2apic_phys.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/mcheck/mce-apei.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/rdrand.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/ebda.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/fpu/regset.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/kernel/i386_ksyms_32.c
arch/x86/kernel/kvm.c
arch/x86/kernel/platform-quirks.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup.c
arch/x86/kernel/signal_compat.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/kernel/tsc_msr.c
arch/x86/kernel/vm86_32.c
arch/x86/kernel/x8664_ksyms_64.c
arch/x86/kernel/x86_init.c
arch/x86/kvm/x86.c
arch/x86/lguest/boot.c
arch/x86/lib/Makefile
arch/x86/lib/copy_user_64.S
arch/x86/lib/csum-wrappers_64.c
arch/x86/lib/getuser.S
arch/x86/lib/hweight.S [new file with mode: 0644]
arch/x86/lib/kaslr.c [new file with mode: 0644]
arch/x86/lib/putuser.S
arch/x86/lib/usercopy_64.c
arch/x86/mm/Makefile
arch/x86/mm/dump_pagetables.c
arch/x86/mm/extable.c
arch/x86/mm/fault.c
arch/x86/mm/init.c
arch/x86/mm/init_64.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/kaslr.c [new file with mode: 0644]
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/mm/pgtable_32.c
arch/x86/pci/acpi.c
arch/x86/pci/intel_mid_pci.c
arch/x86/platform/atom/punit_atom_debug.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/intel-mid/Makefile
arch/x86/platform/intel-mid/device_libs/Makefile
arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c [new file with mode: 0644]
arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c [new file with mode: 0644]
arch/x86/platform/intel-mid/device_libs/platform_spidev.c [new file with mode: 0644]
arch/x86/platform/intel-mid/intel-mid.c
arch/x86/platform/intel-mid/mrfld.c [moved from arch/x86/platform/intel-mid/mrfl.c with 97% similarity]
arch/x86/platform/intel-mid/pwr.c [new file with mode: 0644]
arch/x86/platform/intel-mid/sfi.c
arch/x86/platform/uv/bios_uv.c
arch/x86/power/hibernate_64.c
arch/x86/power/hibernate_asm_64.S
arch/x86/ras/mce_amd_inj.c
arch/x86/realmode/init.c
arch/x86/xen/apic.c
arch/x86/xen/enlighten.c
arch/xtensa/include/asm/atomic.h
arch/xtensa/include/asm/spinlock.h
arch/xtensa/platforms/xt2000/setup.c
block/genhd.c
block/ioprio.c
crypto/asymmetric_keys/mscode_parser.c
crypto/asymmetric_keys/pkcs7_verify.c
crypto/asymmetric_keys/restrict.c
crypto/rsa-pkcs1pad.c
drivers/Makefile
drivers/acpi/acpi_dbg.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/ec.c
drivers/acpi/nfit.c
drivers/acpi/pci_link.c
drivers/acpi/utils.c
drivers/ata/libata-core.c
drivers/bcma/bcma_private.h
drivers/block/xen-blkfront.c
drivers/char/dsp56k.c
drivers/char/mem.c
drivers/clk/at91/clk-programmable.c
drivers/clk/sunxi/clk-sun4i-display.c
drivers/clk/sunxi/clk-sun4i-tcon-ch1.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/arm_global_timer.c
drivers/clocksource/armv7m_systick.c
drivers/clocksource/asm9260_timer.c
drivers/clocksource/bcm2835_timer.c
drivers/clocksource/bcm_kona_timer.c
drivers/clocksource/cadence_ttc_timer.c
drivers/clocksource/clksrc-dbx500-prcmu.c
drivers/clocksource/clksrc-probe.c
drivers/clocksource/clksrc_st_lpc.c
drivers/clocksource/clps711x-timer.c
drivers/clocksource/dw_apb_timer_of.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/fsl_ftm_timer.c
drivers/clocksource/h8300_timer16.c
drivers/clocksource/h8300_timer8.c
drivers/clocksource/h8300_tpu.c
drivers/clocksource/meson6_timer.c
drivers/clocksource/mips-gic-timer.c
drivers/clocksource/moxart_timer.c
drivers/clocksource/mps2-timer.c
drivers/clocksource/mtk_timer.c
drivers/clocksource/mxs_timer.c
drivers/clocksource/nomadik-mtu.c
drivers/clocksource/pxa_timer.c
drivers/clocksource/qcom-timer.c
drivers/clocksource/rockchip_timer.c
drivers/clocksource/samsung_pwm_timer.c
drivers/clocksource/sun4i_timer.c
drivers/clocksource/tango_xtal.c
drivers/clocksource/tegra20_timer.c
drivers/clocksource/time-armada-370-xp.c
drivers/clocksource/time-efm32.c
drivers/clocksource/time-lpc32xx.c
drivers/clocksource/time-orion.c
drivers/clocksource/time-pistachio.c
drivers/clocksource/timer-atlas7.c
drivers/clocksource/timer-atmel-pit.c
drivers/clocksource/timer-atmel-st.c
drivers/clocksource/timer-digicolor.c
drivers/clocksource/timer-imx-gpt.c
drivers/clocksource/timer-integrator-ap.c
drivers/clocksource/timer-keystone.c
drivers/clocksource/timer-nps.c
drivers/clocksource/timer-oxnas-rps.c [new file with mode: 0644]
drivers/clocksource/timer-prima2.c
drivers/clocksource/timer-sp804.c
drivers/clocksource/timer-stm32.c
drivers/clocksource/timer-sun5i.c
drivers/clocksource/timer-ti-32k.c
drivers/clocksource/timer-u300.c
drivers/clocksource/versatile.c
drivers/clocksource/vf_pit_timer.c
drivers/clocksource/vt8500_timer.c
drivers/clocksource/zevio-timer.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpuidle/cpuidle.c
drivers/crypto/qat/qat_common/Makefile
drivers/dma/hsu/hsu.c
drivers/dma/hsu/pci.c
drivers/edac/sb_edac.c
drivers/extcon/Makefile
drivers/extcon/devres.c [new file with mode: 0644]
drivers/extcon/extcon-adc-jack.c
drivers/extcon/extcon-usb-gpio.c
drivers/extcon/extcon.c
drivers/firmware/efi/efibc.c
drivers/firmware/efi/runtime-wrappers.c
drivers/gpio/Kconfig
drivers/gpio/gpio-sch.c
drivers/gpio/gpiolib-legacy.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
drivers/gpu/drm/amd/amdgpu/atombios_i2c.h
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
drivers/gpu/drm/sun4i/sun4i_crtc.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/ad7314.c
drivers/hwmon/ads7871.c
drivers/hwmon/adt7411.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/emc6w201.c
drivers/hwmon/ftsteutates.c [new file with mode: 0644]
drivers/hwmon/ina3221.c [new file with mode: 0644]
drivers/hwmon/jc42.c
drivers/hwmon/jz4740-hwmon.c
drivers/hwmon/lm75.c
drivers/hwmon/lm90.c
drivers/hwmon/sht3x.c [new file with mode: 0644]
drivers/hwmon/tmp102.c
drivers/hwmon/tmp401.c
drivers/hwtracing/intel_th/core.c
drivers/hwtracing/intel_th/gth.c
drivers/hwtracing/intel_th/intel_th.h
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/stm/core.c
drivers/i2c/busses/i2c-qup.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-boardinfo.c
drivers/i2c/muxes/i2c-mux-reg.c
drivers/idle/intel_idle.c
drivers/iio/Kconfig
drivers/iio/Makefile
drivers/iio/accel/Kconfig
drivers/iio/accel/Makefile
drivers/iio/accel/bma180.c
drivers/iio/accel/bma220_spi.c [new file with mode: 0644]
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/accel/kxcjk-1013.c
drivers/iio/accel/mma7455_core.c
drivers/iio/accel/mma7660.c [new file with mode: 0644]
drivers/iio/accel/mma8452.c
drivers/iio/accel/mma9551.c
drivers/iio/accel/mma9553.c
drivers/iio/accel/st_accel.h
drivers/iio/accel/st_accel_core.c
drivers/iio/accel/st_accel_i2c.c
drivers/iio/accel/st_accel_spi.c
drivers/iio/adc/Kconfig
drivers/iio/adc/Makefile
drivers/iio/adc/ad7266.c
drivers/iio/adc/ad7291.c
drivers/iio/adc/ad7298.c
drivers/iio/adc/ad7476.c
drivers/iio/adc/ad7791.c
drivers/iio/adc/ad7793.c
drivers/iio/adc/ad7887.c
drivers/iio/adc/ad7923.c
drivers/iio/adc/ad799x.c
drivers/iio/adc/bcm_iproc_adc.c [new file with mode: 0644]
drivers/iio/adc/cc10001_adc.c
drivers/iio/adc/hi8435.c
drivers/iio/adc/ina2xx-adc.c
drivers/iio/adc/max1027.c
drivers/iio/adc/max1363.c
drivers/iio/adc/mcp320x.c
drivers/iio/adc/mcp3422.c
drivers/iio/adc/mxs-lradc.c
drivers/iio/adc/nau7802.c
drivers/iio/adc/ti-adc081c.c
drivers/iio/adc/ti-adc0832.c
drivers/iio/adc/ti-adc128s052.c
drivers/iio/adc/ti-ads1015.c
drivers/iio/adc/ti-ads8688.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/adc/vf610_adc.c
drivers/iio/adc/xilinx-xadc-events.c
drivers/iio/buffer/industrialio-buffer-dma.c
drivers/iio/chemical/Kconfig
drivers/iio/chemical/atlas-ph-sensor.c
drivers/iio/common/st_sensors/st_sensors_buffer.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/common/st_sensors/st_sensors_i2c.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/dac/Kconfig
drivers/iio/dac/ad5421.c
drivers/iio/dac/ad5504.c
drivers/iio/dac/ad5755.c
drivers/iio/dac/stx104.c
drivers/iio/dummy/Kconfig
drivers/iio/dummy/iio_simple_dummy.c
drivers/iio/dummy/iio_simple_dummy_buffer.c
drivers/iio/dummy/iio_simple_dummy_events.c
drivers/iio/gyro/bmg160_core.c
drivers/iio/gyro/st_gyro_core.c
drivers/iio/health/afe4403.c
drivers/iio/health/afe4404.c
drivers/iio/health/afe440x.h
drivers/iio/humidity/am2315.c
drivers/iio/humidity/htu21.c
drivers/iio/iio_core.h
drivers/iio/imu/bmi160/bmi160_core.c
drivers/iio/imu/inv_mpu6050/Kconfig
drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c
drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
drivers/iio/industrialio-core.c
drivers/iio/industrialio-event.c
drivers/iio/industrialio-sw-device.c [new file with mode: 0644]
drivers/iio/industrialio-trigger.c
drivers/iio/light/acpi-als.c
drivers/iio/light/adjd_s311.c
drivers/iio/light/apds9300.c
drivers/iio/light/apds9960.c
drivers/iio/light/cm36651.c
drivers/iio/light/gp2ap020a00f.c
drivers/iio/light/isl29125.c
drivers/iio/light/jsa1212.c
drivers/iio/light/lm3533-als.c
drivers/iio/light/ltr501.c
drivers/iio/light/max44000.c
drivers/iio/light/opt3001.c
drivers/iio/light/stk3310.c
drivers/iio/light/tcs3414.c
drivers/iio/light/tcs3472.c
drivers/iio/light/tsl2563.c
drivers/iio/light/us5182d.c
drivers/iio/magnetometer/Kconfig
drivers/iio/magnetometer/ak8975.c
drivers/iio/magnetometer/bmc150_magn_i2c.c
drivers/iio/magnetometer/bmc150_magn_spi.c
drivers/iio/magnetometer/hmc5843_core.c
drivers/iio/magnetometer/mag3110.c
drivers/iio/magnetometer/st_magn_core.c
drivers/iio/potentiometer/Kconfig
drivers/iio/potentiometer/Makefile
drivers/iio/potentiometer/max5487.c [new file with mode: 0644]
drivers/iio/potentiometer/mcp4531.c
drivers/iio/potentiometer/tpl0102.c
drivers/iio/pressure/Kconfig
drivers/iio/pressure/Makefile
drivers/iio/pressure/bmp280-core.c [moved from drivers/iio/pressure/bmp280.c with 58% similarity]
drivers/iio/pressure/bmp280-i2c.c [new file with mode: 0644]
drivers/iio/pressure/bmp280-regmap.c [new file with mode: 0644]
drivers/iio/pressure/bmp280-spi.c [new file with mode: 0644]
drivers/iio/pressure/bmp280.h [new file with mode: 0644]
drivers/iio/pressure/hp206c.c
drivers/iio/pressure/mpl3115.c
drivers/iio/pressure/ms5611_core.c
drivers/iio/pressure/ms5637.c
drivers/iio/pressure/st_pressure.h
drivers/iio/pressure/st_pressure_core.c
drivers/iio/pressure/st_pressure_i2c.c
drivers/iio/pressure/st_pressure_spi.c
drivers/iio/proximity/as3935.c
drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
drivers/iio/proximity/sx9500.c
drivers/iio/temperature/tsys02d.c
drivers/iio/trigger/Kconfig
drivers/iio/trigger/Makefile
drivers/iio/trigger/iio-trig-loop.c [new file with mode: 0644]
drivers/infiniband/core/sysfs.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/ud.c
drivers/infiniband/hw/i40iw/i40iw_main.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/input/joystick/xpad.c
drivers/input/rmi4/rmi_bus.c
drivers/input/rmi4/rmi_f12.c
drivers/input/touchscreen/ts4800-ts.c
drivers/input/touchscreen/tsc2004.c
drivers/input/touchscreen/tsc2005.c
drivers/input/touchscreen/tsc200x-core.c
drivers/input/touchscreen/tsc200x-core.h
drivers/input/touchscreen/wacom_w8001.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-mips-gic.c
drivers/media/i2c/adv7604.c
drivers/media/usb/airspy/airspy.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/misc/Makefile
drivers/misc/lkdtm.c [deleted file]
drivers/misc/lkdtm.h [new file with mode: 0644]
drivers/misc/lkdtm_bugs.c [new file with mode: 0644]
drivers/misc/lkdtm_core.c [new file with mode: 0644]
drivers/misc/lkdtm_heap.c [new file with mode: 0644]
drivers/misc/lkdtm_perms.c [new file with mode: 0644]
drivers/misc/lkdtm_rodata.c [new file with mode: 0644]
drivers/misc/lkdtm_usercopy.c [new file with mode: 0644]
drivers/misc/mei/hbm.c
drivers/misc/mei/mei_dev.h
drivers/mmc/card/block.c
drivers/mmc/host/jz4740_mmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/sdhci-acpi.c
drivers/mtd/nand/omap2.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/ethernet/agere/et131x.c
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
drivers/net/ethernet/microchip/enc28j60.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/geneve.c
drivers/net/macsec.c
drivers/net/phy/dp83867.c
drivers/net/ppp/ppp_generic.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/nvme/host/core.c
drivers/nvmem/Kconfig
drivers/nvmem/imx-ocotp.c
drivers/nvmem/mtk-efuse.c
drivers/nvmem/mxs-ocotp.c
drivers/pci/Makefile
drivers/pci/pci-mid.c [new file with mode: 0644]
drivers/phy/Kconfig
drivers/phy/Makefile
drivers/phy/phy-core.c
drivers/phy/phy-da8xx-usb.c [new file with mode: 0644]
drivers/phy/phy-qcom-ufs-qmp-14nm.c
drivers/phy/phy-qcom-ufs-qmp-20nm.c
drivers/phy/phy-rcar-gen3-usb2.c
drivers/phy/phy-rockchip-usb.c
drivers/phy/phy-sun4i-usb.c
drivers/phy/phy-xgene.c
drivers/platform/chrome/cros_ec_dev.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/intel_telemetry_debugfs.c
drivers/platform/x86/intel_telemetry_pltdrv.c
drivers/pnp/isapnp/proc.c
drivers/power/Kconfig
drivers/power/axp288_charger.c
drivers/power/bq27xxx_battery.c
drivers/powercap/intel_rapl.c
drivers/pps/clients/pps_parport.c
drivers/regulator/qcom_smd-regulator.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/ipr.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/scsi_devinfo.c
drivers/sh/pm_runtime.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/android/Kconfig
drivers/staging/android/Makefile
drivers/staging/android/sw_sync.c
drivers/staging/android/sw_sync.h [deleted file]
drivers/staging/android/sync.c [deleted file]
drivers/staging/android/sync.h [deleted file]
drivers/staging/android/sync_debug.c
drivers/staging/android/sync_debug.h [new file with mode: 0644]
drivers/staging/android/trace/sync.h
drivers/staging/android/uapi/sw_sync.h [deleted file]
drivers/staging/comedi/comedi.h
drivers/staging/comedi/comedi_fops.c
drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c [deleted file]
drivers/staging/comedi/drivers/addi_apci_1564.c
drivers/staging/comedi/drivers/adl_pci9118.c
drivers/staging/comedi/drivers/cb_pcidas64.c
drivers/staging/comedi/drivers/comedi_bond.c
drivers/staging/comedi/drivers/daqboard2000.c
drivers/staging/comedi/drivers/das16.c
drivers/staging/comedi/drivers/das16m1.c
drivers/staging/comedi/drivers/das6402.c
drivers/staging/comedi/drivers/das800.c
drivers/staging/comedi/drivers/dmm32at.c
drivers/staging/comedi/drivers/dt2801.c
drivers/staging/comedi/drivers/dt2811.c
drivers/staging/comedi/drivers/dt2814.c
drivers/staging/comedi/drivers/dt2815.c
drivers/staging/comedi/drivers/dt2817.c
drivers/staging/comedi/drivers/gsc_hpdi.c
drivers/staging/comedi/drivers/jr3_pci.c
drivers/staging/comedi/drivers/me_daq.c
drivers/staging/comedi/drivers/mpc624.c
drivers/staging/comedi/drivers/ni_65xx.c
drivers/staging/comedi/drivers/ni_pcidio.c
drivers/staging/comedi/drivers/ni_pcimio.c
drivers/staging/comedi/drivers/pcmmio.c
drivers/staging/comedi/drivers/pcmuio.c
drivers/staging/comedi/drivers/plx9080.h
drivers/staging/comedi/drivers/quatech_daqp_cs.c
drivers/staging/comedi/drivers/rtd520.c
drivers/staging/comedi/drivers/s626.c
drivers/staging/comedi/drivers/s626.h
drivers/staging/comedi/drivers/serial2002.c
drivers/staging/fsl-mc/bus/dpbp.c
drivers/staging/fsl-mc/bus/dpmcp-cmd.h
drivers/staging/fsl-mc/bus/dpmcp.c
drivers/staging/fsl-mc/bus/dpmng-cmd.h
drivers/staging/fsl-mc/bus/dpmng.c
drivers/staging/fsl-mc/bus/dprc-cmd.h
drivers/staging/fsl-mc/bus/dprc-driver.c
drivers/staging/fsl-mc/bus/dprc.c
drivers/staging/fsl-mc/bus/mc-allocator.c
drivers/staging/fsl-mc/bus/mc-bus.c
drivers/staging/fsl-mc/bus/mc-msi.c
drivers/staging/fsl-mc/bus/mc-sys.c
drivers/staging/fsl-mc/include/dpbp-cmd.h
drivers/staging/fsl-mc/include/mc-cmd.h
drivers/staging/fsl-mc/include/mc.h
drivers/staging/iio/accel/Kconfig
drivers/staging/iio/accel/Makefile
drivers/staging/iio/accel/lis3l02dq.h [deleted file]
drivers/staging/iio/accel/lis3l02dq_core.c [deleted file]
drivers/staging/iio/accel/lis3l02dq_ring.c [deleted file]
drivers/staging/iio/accel/sca3000_core.c
drivers/staging/iio/adc/ad7280a.c
drivers/staging/iio/adc/ad7606_ring.c
drivers/staging/iio/adc/ad7816.c
drivers/staging/iio/addac/adt7316.c
drivers/staging/iio/cdc/ad7150.c
drivers/staging/iio/light/tsl2x7x_core.c
drivers/staging/ks7010/Kconfig [new file with mode: 0644]
drivers/staging/ks7010/Makefile [new file with mode: 0644]
drivers/staging/ks7010/TODO [new file with mode: 0644]
drivers/staging/ks7010/eap_packet.h [new file with mode: 0644]
drivers/staging/ks7010/ks7010_sdio.c [new file with mode: 0644]
drivers/staging/ks7010/ks7010_sdio.h [new file with mode: 0644]
drivers/staging/ks7010/ks_hostif.c [new file with mode: 0644]
drivers/staging/ks7010/ks_hostif.h [new file with mode: 0644]
drivers/staging/ks7010/ks_wlan.h [new file with mode: 0644]
drivers/staging/ks7010/ks_wlan_ioctl.h [new file with mode: 0644]
drivers/staging/ks7010/ks_wlan_net.c [new file with mode: 0644]
drivers/staging/ks7010/michael_mic.c [new file with mode: 0644]
drivers/staging/ks7010/michael_mic.h [new file with mode: 0644]
drivers/staging/lustre/include/linux/libcfs/curproc.h
drivers/staging/lustre/include/linux/libcfs/libcfs.h
drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
drivers/staging/lustre/include/linux/libcfs/libcfs_string.h
drivers/staging/lustre/include/linux/libcfs/libcfs_time.h
drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
drivers/staging/lustre/include/linux/lnet/lib-dlc.h
drivers/staging/lustre/include/linux/lnet/lib-types.h
drivers/staging/lustre/include/linux/lnet/types.h
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_modparams.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
drivers/staging/lustre/lnet/libcfs/debug.c
drivers/staging/lustre/lnet/libcfs/fail.c
drivers/staging/lustre/lnet/libcfs/hash.c
drivers/staging/lustre/lnet/libcfs/libcfs_string.c
drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c
drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c
drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c
drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c
drivers/staging/lustre/lnet/libcfs/module.c
drivers/staging/lustre/lnet/libcfs/prng.c
drivers/staging/lustre/lnet/libcfs/tracefile.c
drivers/staging/lustre/lnet/libcfs/tracefile.h
drivers/staging/lustre/lnet/libcfs/workitem.c
drivers/staging/lustre/lnet/lnet/acceptor.c
drivers/staging/lustre/lnet/lnet/api-ni.c
drivers/staging/lustre/lnet/lnet/config.c
drivers/staging/lustre/lnet/lnet/lib-eq.c
drivers/staging/lustre/lnet/lnet/lib-md.c
drivers/staging/lustre/lnet/lnet/lib-me.c
drivers/staging/lustre/lnet/lnet/lib-move.c
drivers/staging/lustre/lnet/lnet/lib-msg.c
drivers/staging/lustre/lnet/lnet/lo.c
drivers/staging/lustre/lnet/lnet/module.c
drivers/staging/lustre/lnet/lnet/net_fault.c
drivers/staging/lustre/lnet/lnet/nidstrings.c
drivers/staging/lustre/lnet/lnet/peer.c
drivers/staging/lustre/lnet/lnet/router.c
drivers/staging/lustre/lnet/selftest/brw_test.c
drivers/staging/lustre/lnet/selftest/conctl.c
drivers/staging/lustre/lnet/selftest/conrpc.c
drivers/staging/lustre/lnet/selftest/conrpc.h
drivers/staging/lustre/lnet/selftest/console.c
drivers/staging/lustre/lnet/selftest/console.h
drivers/staging/lustre/lnet/selftest/framework.c
drivers/staging/lustre/lnet/selftest/module.c
drivers/staging/lustre/lnet/selftest/ping_test.c
drivers/staging/lustre/lnet/selftest/rpc.c
drivers/staging/lustre/lnet/selftest/rpc.h
drivers/staging/lustre/lnet/selftest/selftest.h
drivers/staging/lustre/lnet/selftest/timer.c
drivers/staging/lustre/lnet/selftest/timer.h
drivers/staging/lustre/lustre/Kconfig
drivers/staging/lustre/lustre/fid/fid_internal.h
drivers/staging/lustre/lustre/fid/fid_lib.c
drivers/staging/lustre/lustre/fid/fid_request.c
drivers/staging/lustre/lustre/fid/lproc_fid.c
drivers/staging/lustre/lustre/fld/fld_cache.c
drivers/staging/lustre/lustre/fld/fld_internal.h
drivers/staging/lustre/lustre/fld/fld_request.c
drivers/staging/lustre/lustre/fld/lproc_fld.c
drivers/staging/lustre/lustre/include/cl_object.h
drivers/staging/lustre/lustre/include/interval_tree.h
drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
drivers/staging/lustre/lustre/include/linux/lustre_lite.h
drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
drivers/staging/lustre/lustre/include/linux/lustre_user.h
drivers/staging/lustre/lustre/include/lprocfs_status.h
drivers/staging/lustre/lustre/include/lu_object.h
drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h
drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
drivers/staging/lustre/lustre/include/lustre/lustre_user.h
drivers/staging/lustre/lustre/include/lustre_acl.h
drivers/staging/lustre/lustre/include/lustre_cfg.h
drivers/staging/lustre/lustre/include/lustre_debug.h
drivers/staging/lustre/lustre/include/lustre_disk.h
drivers/staging/lustre/lustre/include/lustre_dlm.h
drivers/staging/lustre/lustre/include/lustre_eacl.h
drivers/staging/lustre/lustre/include/lustre_export.h
drivers/staging/lustre/lustre/include/lustre_fid.h
drivers/staging/lustre/lustre/include/lustre_fld.h
drivers/staging/lustre/lustre/include/lustre_ha.h
drivers/staging/lustre/lustre/include/lustre_handles.h
drivers/staging/lustre/lustre/include/lustre_import.h
drivers/staging/lustre/lustre/include/lustre_intent.h
drivers/staging/lustre/lustre/include/lustre_lib.h
drivers/staging/lustre/lustre/include/lustre_lite.h
drivers/staging/lustre/lustre/include/lustre_log.h
drivers/staging/lustre/lustre/include/lustre_mdc.h
drivers/staging/lustre/lustre/include/lustre_mds.h
drivers/staging/lustre/lustre/include/lustre_net.h
drivers/staging/lustre/lustre/include/lustre_param.h
drivers/staging/lustre/lustre/include/lustre_req_layout.h
drivers/staging/lustre/lustre/include/lustre_sec.h
drivers/staging/lustre/lustre/include/obd.h
drivers/staging/lustre/lustre/include/obd_cksum.h
drivers/staging/lustre/lustre/include/obd_class.h
drivers/staging/lustre/lustre/include/obd_support.h
drivers/staging/lustre/lustre/ldlm/interval_tree.c
drivers/staging/lustre/lustre/ldlm/l_lock.c
drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c
drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
drivers/staging/lustre/lustre/ldlm/ldlm_plain.c
drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
drivers/staging/lustre/lustre/ldlm/ldlm_request.c
drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
drivers/staging/lustre/lustre/llite/Makefile
drivers/staging/lustre/lustre/llite/dcache.c
drivers/staging/lustre/lustre/llite/dir.c
drivers/staging/lustre/lustre/llite/file.c
drivers/staging/lustre/lustre/llite/glimpse.c
drivers/staging/lustre/lustre/llite/lcommon_cl.c
drivers/staging/lustre/lustre/llite/lcommon_misc.c
drivers/staging/lustre/lustre/llite/llite_close.c
drivers/staging/lustre/lustre/llite/llite_internal.h
drivers/staging/lustre/lustre/llite/llite_lib.c
drivers/staging/lustre/lustre/llite/llite_mmap.c
drivers/staging/lustre/lustre/llite/llite_nfs.c
drivers/staging/lustre/lustre/llite/llite_rmtacl.c [deleted file]
drivers/staging/lustre/lustre/llite/lloop.c [deleted file]
drivers/staging/lustre/lustre/llite/lproc_llite.c
drivers/staging/lustre/lustre/llite/namei.c
drivers/staging/lustre/lustre/llite/remote_perm.c [deleted file]
drivers/staging/lustre/lustre/llite/rw.c
drivers/staging/lustre/lustre/llite/rw26.c
drivers/staging/lustre/lustre/llite/statahead.c
drivers/staging/lustre/lustre/llite/super25.c
drivers/staging/lustre/lustre/llite/symlink.c
drivers/staging/lustre/lustre/llite/vvp_dev.c
drivers/staging/lustre/lustre/llite/vvp_internal.h
drivers/staging/lustre/lustre/llite/vvp_io.c
drivers/staging/lustre/lustre/llite/vvp_lock.c
drivers/staging/lustre/lustre/llite/vvp_object.c
drivers/staging/lustre/lustre/llite/vvp_page.c
drivers/staging/lustre/lustre/llite/vvp_req.c
drivers/staging/lustre/lustre/llite/xattr.c
drivers/staging/lustre/lustre/llite/xattr_cache.c
drivers/staging/lustre/lustre/lmv/lmv_fld.c
drivers/staging/lustre/lustre/lmv/lmv_intent.c
drivers/staging/lustre/lustre/lmv/lmv_internal.h
drivers/staging/lustre/lustre/lmv/lmv_obd.c
drivers/staging/lustre/lustre/lmv/lproc_lmv.c
drivers/staging/lustre/lustre/lov/lov_cl_internal.h
drivers/staging/lustre/lustre/lov/lov_dev.c
drivers/staging/lustre/lustre/lov/lov_ea.c
drivers/staging/lustre/lustre/lov/lov_internal.h
drivers/staging/lustre/lustre/lov/lov_io.c
drivers/staging/lustre/lustre/lov/lov_lock.c
drivers/staging/lustre/lustre/lov/lov_merge.c
drivers/staging/lustre/lustre/lov/lov_obd.c
drivers/staging/lustre/lustre/lov/lov_object.c
drivers/staging/lustre/lustre/lov/lov_offset.c
drivers/staging/lustre/lustre/lov/lov_pack.c
drivers/staging/lustre/lustre/lov/lov_page.c
drivers/staging/lustre/lustre/lov/lov_pool.c
drivers/staging/lustre/lustre/lov/lov_request.c
drivers/staging/lustre/lustre/lov/lovsub_dev.c
drivers/staging/lustre/lustre/lov/lovsub_io.c
drivers/staging/lustre/lustre/lov/lovsub_lock.c
drivers/staging/lustre/lustre/lov/lovsub_object.c
drivers/staging/lustre/lustre/lov/lovsub_page.c
drivers/staging/lustre/lustre/lov/lproc_lov.c
drivers/staging/lustre/lustre/mdc/lproc_mdc.c
drivers/staging/lustre/lustre/mdc/mdc_internal.h
drivers/staging/lustre/lustre/mdc/mdc_lib.c
drivers/staging/lustre/lustre/mdc/mdc_locks.c
drivers/staging/lustre/lustre/mdc/mdc_reint.c
drivers/staging/lustre/lustre/mdc/mdc_request.c
drivers/staging/lustre/lustre/mgc/lproc_mgc.c
drivers/staging/lustre/lustre/mgc/mgc_internal.h
drivers/staging/lustre/lustre/mgc/mgc_request.c
drivers/staging/lustre/lustre/obdclass/Makefile
drivers/staging/lustre/lustre/obdclass/acl.c [deleted file]
drivers/staging/lustre/lustre/obdclass/cl_internal.h
drivers/staging/lustre/lustre/obdclass/cl_io.c
drivers/staging/lustre/lustre/obdclass/cl_lock.c
drivers/staging/lustre/lustre/obdclass/cl_object.c
drivers/staging/lustre/lustre/obdclass/cl_page.c
drivers/staging/lustre/lustre/obdclass/class_obd.c
drivers/staging/lustre/lustre/obdclass/debug.c
drivers/staging/lustre/lustre/obdclass/genops.c
drivers/staging/lustre/lustre/obdclass/kernelcomm.c
drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
drivers/staging/lustre/lustre/obdclass/llog.c
drivers/staging/lustre/lustre/obdclass/llog_cat.c
drivers/staging/lustre/lustre/obdclass/llog_internal.h
drivers/staging/lustre/lustre/obdclass/llog_obd.c
drivers/staging/lustre/lustre/obdclass/llog_swab.c
drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
drivers/staging/lustre/lustre/obdclass/lu_object.c
drivers/staging/lustre/lustre/obdclass/lu_ref.c
drivers/staging/lustre/lustre/obdclass/lustre_handles.c
drivers/staging/lustre/lustre/obdclass/lustre_peer.c
drivers/staging/lustre/lustre/obdclass/obd_config.c
drivers/staging/lustre/lustre/obdclass/obd_mount.c
drivers/staging/lustre/lustre/obdclass/obdo.c
drivers/staging/lustre/lustre/obdclass/statfs_pack.c
drivers/staging/lustre/lustre/obdclass/uuid.c
drivers/staging/lustre/lustre/obdecho/echo_client.c
drivers/staging/lustre/lustre/osc/lproc_osc.c
drivers/staging/lustre/lustre/osc/osc_cache.c
drivers/staging/lustre/lustre/osc/osc_cl_internal.h
drivers/staging/lustre/lustre/osc/osc_dev.c
drivers/staging/lustre/lustre/osc/osc_internal.h
drivers/staging/lustre/lustre/osc/osc_io.c
drivers/staging/lustre/lustre/osc/osc_lock.c
drivers/staging/lustre/lustre/osc/osc_object.c
drivers/staging/lustre/lustre/osc/osc_page.c
drivers/staging/lustre/lustre/osc/osc_request.c
drivers/staging/lustre/lustre/ptlrpc/client.c
drivers/staging/lustre/lustre/ptlrpc/connection.c
drivers/staging/lustre/lustre/ptlrpc/events.c
drivers/staging/lustre/lustre/ptlrpc/import.c
drivers/staging/lustre/lustre/ptlrpc/layout.c
drivers/staging/lustre/lustre/ptlrpc/llog_client.c
drivers/staging/lustre/lustre/ptlrpc/llog_net.c
drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
drivers/staging/lustre/lustre/ptlrpc/niobuf.c
drivers/staging/lustre/lustre/ptlrpc/nrs.c
drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
drivers/staging/lustre/lustre/ptlrpc/pers.c
drivers/staging/lustre/lustre/ptlrpc/pinger.c
drivers/staging/lustre/lustre/ptlrpc/ptlrpc_internal.h
drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
drivers/staging/lustre/lustre/ptlrpc/recover.c
drivers/staging/lustre/lustre/ptlrpc/sec.c
drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
drivers/staging/lustre/lustre/ptlrpc/sec_config.c
drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c
drivers/staging/lustre/lustre/ptlrpc/sec_null.c
drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
drivers/staging/lustre/lustre/ptlrpc/service.c
drivers/staging/lustre/lustre/ptlrpc/wiretest.c
drivers/staging/lustre/sysfs-fs-lustre
drivers/staging/rtl8192u/ieee80211/ieee80211.h
drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
drivers/staging/rtl8192u/ieee80211/ieee80211_wx.c
drivers/staging/rtl8192u/r8180_93cx6.c
drivers/staging/rtl8192u/r8180_93cx6.h
drivers/staging/rtl8192u/r8192U.h
drivers/staging/rtl8192u/r8192U_core.c
drivers/staging/rtl8192u/r8192U_wx.c
drivers/staging/unisys/visorbus/iovmcall_gnuc.h
drivers/staging/unisys/visorbus/visorbus_main.c
drivers/staging/unisys/visorbus/visorchipset.c
drivers/staging/unisys/visorhba/visorhba_main.c
drivers/staging/unisys/visorinput/visorinput.c
drivers/staging/unisys/visornic/visornic_main.c
drivers/staging/wilc1000/Makefile
drivers/staging/wilc1000/TODO
drivers/staging/wilc1000/host_interface.c
drivers/staging/wilc1000/host_interface.h
drivers/staging/wilc1000/linux_wlan.c
drivers/staging/wilc1000/wilc_msgqueue.c [deleted file]
drivers/staging/wilc1000/wilc_msgqueue.h [deleted file]
drivers/staging/wilc1000/wilc_sdio.c
drivers/staging/wilc1000/wilc_spi.c
drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
drivers/staging/wilc1000/wilc_wfi_netdevice.h
drivers/staging/wilc1000/wilc_wlan.c
drivers/thermal/intel_soc_dts_thermal.c
drivers/tty/cyclades.c
drivers/tty/ipwireless/tty.c
drivers/tty/metag_da.c
drivers/tty/mips_ejtag_fdc.c
drivers/tty/mxser.c
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dma.c
drivers/tty/serial/8250/8250_early.c
drivers/tty/serial/8250/8250_fintek.c
drivers/tty/serial/8250/8250_ingenic.c
drivers/tty/serial/8250/8250_mid.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/8250_uniphier.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/Kconfig
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/m32r_sio.c
drivers/tty/serial/max310x.c
drivers/tty/serial/mps2-uart.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/msm_serial.h [deleted file]
drivers/tty/serial/mvebu-uart.c
drivers/tty/serial/pic32_uart.c
drivers/tty/serial/pmac_zilog.c
drivers/tty/serial/pxa.c
drivers/tty/serial/samsung.c
drivers/tty/serial/samsung.h
drivers/tty/serial/serial-tegra.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/serial_mctrl_gpio.c
drivers/tty/serial/serial_mctrl_gpio.h
drivers/tty/serial/sh-sci.c
drivers/tty/serial/sh-sci.h
drivers/tty/serial/sirfsoc_uart.h
drivers/tty/serial/vt8500_serial.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/vt/consolemap.c
drivers/tty/vt/keyboard.c
drivers/tty/vt/vt.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/chipidea/Kconfig
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-wdm.c
drivers/usb/common/common.c
drivers/usb/core/message.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/Kconfig
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd_queue.c
drivers/usb/dwc2/hw.h
drivers/usb/dwc3/core.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/debug.h
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/gadget.h
drivers/usb/dwc3/host.c
drivers/usb/dwc3/io.h
drivers/usb/dwc3/platform_data.h [deleted file]
drivers/usb/dwc3/trace.h
drivers/usb/early/ehci-dbgp.c
drivers/usb/gadget/Kconfig
drivers/usb/gadget/config.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_mass_storage.c
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/legacy/g_ffs.c
drivers/usb/gadget/udc/Kconfig
drivers/usb/gadget/udc/Makefile
drivers/usb/gadget/udc/amd5536udc.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/bdc/bdc_cmd.c
drivers/usb/gadget/udc/bdc/bdc_ep.c
drivers/usb/gadget/udc/core.c [new file with mode: 0644]
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/m66592-udc.c
drivers/usb/gadget/udc/mv_u3d_core.c
drivers/usb/gadget/udc/mv_udc_core.c
drivers/usb/gadget/udc/net2272.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/gadget/udc/net2280.h
drivers/usb/gadget/udc/pch_udc.c
drivers/usb/gadget/udc/pxa27x_udc.c
drivers/usb/gadget/udc/r8a66597-udc.c
drivers/usb/gadget/udc/trace.c [new file with mode: 0644]
drivers/usb/gadget/udc/trace.h [new file with mode: 0644]
drivers/usb/gadget/udc/udc-core.c [deleted file]
drivers/usb/gadget/udc/udc-xilinx.c
drivers/usb/host/Kconfig
drivers/usb/host/ehci-platform.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-platform.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/image/microtek.h
drivers/usb/misc/sisusbvga/sisusb.c
drivers/usb/misc/sisusbvga/sisusb_con.c
drivers/usb/misc/sisusbvga/sisusb_init.h
drivers/usb/misc/usb3503.c
drivers/usb/musb/Makefile
drivers/usb/musb/cppi_dma.c
drivers/usb/musb/cppi_dma.h
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/musb/musb_debug.h
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_gadget_ep0.c
drivers/usb/musb/musb_host.c
drivers/usb/musb/musb_trace.c [new file with mode: 0644]
drivers/usb/musb/musb_trace.h [new file with mode: 0644]
drivers/usb/musb/musb_virthub.c
drivers/usb/musb/musbhsdma.c
drivers/usb/musb/sunxi.c
drivers/usb/phy/Kconfig
drivers/usb/phy/phy-am335x.c
drivers/usb/phy/phy-msm-usb.c
drivers/usb/phy/phy-omap-otg.c
drivers/usb/renesas_usbhs/common.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/rcar3.c
drivers/usb/usbip/usbip_common.h
drivers/usb/usbip/vudc_sysfs.c
drivers/video/console/dummycon.c
drivers/video/console/fbcon.c
drivers/video/console/mdacon.c
drivers/video/console/newport_con.c
drivers/video/console/sticon.c
drivers/video/console/vgacon.c
drivers/xen/xen-acpi-processor.c
drivers/xen/xenbus/xenbus_dev_frontend.c
drivers/xen/xenbus/xenbus_xs.c
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/buffer.c
fs/ceph/file.c
fs/char_dev.c
fs/cifs/dir.c
fs/configfs/file.c
fs/ecryptfs/crypto.c
fs/ecryptfs/file.c
fs/ecryptfs/kthread.c
fs/ecryptfs/main.c
fs/fs-writeback.c
fs/fuse/dir.c
fs/gfs2/aops.c
fs/gfs2/dentry.c
fs/gfs2/dir.c
fs/gfs2/export.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/inode.c
fs/gfs2/inode.h
fs/gfs2/lops.c
fs/gfs2/main.c
fs/gfs2/ops_fstype.c
fs/gfs2/quota.c
fs/gfs2/recovery.c
fs/gfs2/recovery.h
fs/gfs2/rgrp.c
fs/gfs2/super.c
fs/nfs/dir.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/read_write.c
fs/timerfd.c
fs/xfs/xfs_ioctl.c
include/acpi/acpi_drivers.h
include/acpi/acpixf.h
include/asm-generic/atomic-long.h
include/asm-generic/atomic.h
include/asm-generic/atomic64.h
include/asm-generic/barrier.h
include/asm-generic/cputime_nsecs.h
include/asm-generic/mutex-dec.h
include/asm-generic/mutex-xchg.h
include/asm-generic/qspinlock.h
include/asm-generic/rwsem.h
include/asm-generic/vmlinux.lds.h
include/clocksource/timer-sp804.h
include/drm/ttm/ttm_bo_api.h
include/linux/alarmtimer.h
include/linux/atomic.h
include/linux/bcma/bcma.h
include/linux/buffer_head.h
include/linux/clk.h
include/linux/clocksource.h
include/linux/compiler.h
include/linux/console.h
include/linux/console_struct.h
include/linux/context_tracking.h
include/linux/dma/hsu.h
include/linux/efi.h
include/linux/extcon.h
include/linux/extcon/extcon-adc-jack.h
include/linux/fence.h
include/linux/filter.h
include/linux/huge_mm.h
include/linux/iio/common/st_sensors.h
include/linux/iio/iio.h
include/linux/iio/sw_device.h [new file with mode: 0644]
include/linux/kernel.h
include/linux/list.h
include/linux/memcontrol.h
include/linux/mlx5/driver.h
include/linux/mm_types.h
include/linux/mod_devicetable.h
include/linux/netdevice.h
include/linux/nvmem-consumer.h
include/linux/of.h
include/linux/percpu-refcount.h
include/linux/perf_event.h
include/linux/phy/phy.h
include/linux/platform_data/sht3x.h [new file with mode: 0644]
include/linux/posix_acl.h
include/linux/printk.h
include/linux/radix-tree.h
include/linux/random.h
include/linux/rcupdate.h
include/linux/rmap.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/serial_8250.h
include/linux/serial_core.h
include/linux/sfi.h
include/linux/skbuff.h
include/linux/spinlock_up.h
include/linux/time.h
include/linux/timer.h
include/linux/torture.h
include/linux/usb/gadget.h
include/linux/usb/msm_hsusb.h [deleted file]
include/linux/usb/of.h
include/linux/usb/xhci_pdriver.h [deleted file]
include/linux/vt_kern.h
include/linux/vtime.h
include/net/bonding.h
include/net/ip.h
include/net/netfilter/nf_conntrack.h
include/net/sock.h
include/net/switchdev.h
include/uapi/linux/Kbuild
include/uapi/linux/iio/types.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/perf_event.h
init/Kconfig
ipc/sem.c
kernel/bpf/stackmap.c
kernel/cpu.c
kernel/events/callchain.c
kernel/events/core.c
kernel/exit.c
kernel/gcov/gcc_4_7.c
kernel/jump_label.c
kernel/locking/lockdep.c
kernel/locking/mutex-debug.h
kernel/locking/mutex.h
kernel/locking/qrwlock.c
kernel/locking/qspinlock.c
kernel/locking/qspinlock_paravirt.h
kernel/locking/rtmutex.c
kernel/locking/rwsem-xadd.c
kernel/locking/rwsem.c
kernel/locking/rwsem.h
kernel/power/hibernate.c
kernel/rcu/rcuperf.c
kernel/rcu/rcutorture.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_exp.h [new file with mode: 0644]
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/sched/core.c
kernel/sched/cpuacct.c
kernel/sched/cputime.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/loadavg.c
kernel/sched/sched.h
kernel/signal.c
kernel/smp.c
kernel/sysctl.c
kernel/task_work.c
kernel/time/alarmtimer.c
kernel/time/clockevents.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/time/test_udelay.c
kernel/time/tick-broadcast-hrtimer.c
kernel/time/tick-internal.h
kernel/time/tick-sched.c
kernel/time/timeconv.c
kernel/time/timekeeping.c
kernel/time/timer.c
kernel/time/timer_stats.c
kernel/torture.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/atomic64.c
lib/atomic64_test.c
lib/bitmap.c
lib/hweight.c
lib/random32.c
mm/compaction.c
mm/huge_memory.c
mm/hugetlb.c
mm/kasan/quarantine.c
mm/memcontrol.c
mm/memory.c
mm/mmap.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slab_common.c
mm/workingset.c
net/8021q/vlan_dev.c
net/8021q/vlan_netlink.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/originator.c
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/types.h
net/bridge/br_netfilter_hooks.c
net/ceph/osdmap.c
net/core/filter.c
net/core/flow_dissector.c
net/core/skbuff.c
net/core/sock.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/decnet/dn_fib.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_timewait_sock.c
net/ipv4/ip_output.c
net/ipv4/tcp_input.c
net/ipv4/udp.c
net/ipv6/ip6_fib.c
net/ipv6/udp.c
net/irda/ircomm/ircomm_tty_ioctl.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_ct.c
net/netfilter/nft_meta.c
net/packet/af_packet.c
net/rds/tcp.c
net/rose/rose_in.c
net/sched/act_mirred.c
net/sched/sch_htb.c
net/sctp/input.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/link.c
net/tipc/netlink_compat.c
net/tipc/node.c
net/wireless/nl80211.c
net/wireless/util.c
scripts/Makefile.asm-generic
scripts/gdb/linux/.gitignore
scripts/gdb/linux/Makefile
scripts/gdb/linux/constants.py.in
scripts/gdb/linux/radixtree.py [deleted file]
scripts/gdb/linux/symbols.py
scripts/gdb/vmlinux-gdb.py
scripts/mod/devicetable-offsets.c
scripts/mod/file2alias.c
security/apparmor/lsm.c
sound/core/control.c
sound/core/pcm.c
sound/core/timer.c
sound/pci/au88x0/au88x0_core.c
sound/pci/echoaudio/echoaudio.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/Kconfig
sound/soc/codecs/ak4613.c
sound/soc/codecs/cx20442.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5670.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8940.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/davinci/davinci-mcasp.h
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/atom/sst-mfld-platform-compress.c
sound/soc/intel/skylake/bxt-sst.c
sound/soc/sh/rcar/adg.c
sound/usb/card.c
tools/arch/alpha/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/arm/include/uapi/asm/kvm.h [new file with mode: 0644]
tools/arch/arm/include/uapi/asm/perf_regs.h [new file with mode: 0644]
tools/arch/arm64/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/arm64/include/uapi/asm/kvm.h [new file with mode: 0644]
tools/arch/arm64/include/uapi/asm/perf_regs.h [new file with mode: 0644]
tools/arch/frv/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/h8300/include/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/hexagon/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/ia64/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/m32r/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/microblaze/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/mips/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/mips/include/uapi/asm/kvm.h [new file with mode: 0644]
tools/arch/mn10300/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/parisc/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/powerpc/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/powerpc/include/uapi/asm/kvm.h [new file with mode: 0644]
tools/arch/powerpc/include/uapi/asm/perf_regs.h [new file with mode: 0644]
tools/arch/s390/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/s390/include/uapi/asm/kvm.h [new file with mode: 0644]
tools/arch/s390/include/uapi/asm/kvm_perf.h [new file with mode: 0644]
tools/arch/s390/include/uapi/asm/sie.h [new file with mode: 0644]
tools/arch/score/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/sparc/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/tile/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/x86/include/asm/cpufeatures.h [new file with mode: 0644]
tools/arch/x86/include/asm/disabled-features.h [new file with mode: 0644]
tools/arch/x86/include/asm/required-features.h [new file with mode: 0644]
tools/arch/x86/include/asm/unistd_32.h [new file with mode: 0644]
tools/arch/x86/include/asm/unistd_64.h [new file with mode: 0644]
tools/arch/x86/include/uapi/asm/bitsperlong.h [new file with mode: 0644]
tools/arch/x86/include/uapi/asm/kvm.h [new file with mode: 0644]
tools/arch/x86/include/uapi/asm/kvm_perf.h [new file with mode: 0644]
tools/arch/x86/include/uapi/asm/perf_regs.h [new file with mode: 0644]
tools/arch/x86/include/uapi/asm/svm.h [new file with mode: 0644]
tools/arch/x86/include/uapi/asm/vmx.h [new file with mode: 0644]
tools/arch/x86/lib/memcpy_64.S [new file with mode: 0644]
tools/arch/x86/lib/memset_64.S [new file with mode: 0644]
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-libelf-gelf_getnote.c [new file with mode: 0644]
tools/build/feature/test-libelf-getshdrstrndx.c [new file with mode: 0644]
tools/build/feature/test-sdt.c [new file with mode: 0644]
tools/iio/Makefile
tools/iio/iio_generic_buffer.c [moved from tools/iio/generic_buffer.c with 76% similarity]
tools/include/asm-generic/bitops/__ffs.h
tools/include/asm-generic/bitops/__fls.h
tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h
tools/include/asm-generic/bitops/const_hweight.h
tools/include/asm-generic/bitops/fls.h
tools/include/asm-generic/bitops/fls64.h
tools/include/asm-generic/bitsperlong.h [new file with mode: 0644]
tools/include/asm/alternative-asm.h [moved from tools/perf/util/include/asm/alternative-asm.h with 66% similarity]
tools/include/linux/bitops.h
tools/include/linux/compiler.h
tools/include/linux/hash.h
tools/include/linux/kernel.h
tools/include/linux/poison.h
tools/include/linux/string.h
tools/include/uapi/asm-generic/bitsperlong.h [new file with mode: 0644]
tools/include/uapi/linux/bpf.h [new file with mode: 0644]
tools/include/uapi/linux/bpf_common.h [new file with mode: 0644]
tools/include/uapi/linux/hw_breakpoint.h [new file with mode: 0644]
tools/include/uapi/linux/perf_event.h [new file with mode: 0644]
tools/lib/api/Makefile
tools/lib/api/fd/array.c
tools/lib/api/fd/array.h
tools/lib/api/fs/fs.c
tools/lib/api/fs/tracing_path.c
tools/lib/bpf/Makefile
tools/lib/bpf/bpf.c
tools/lib/bpf/bpf.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf.h
tools/lib/str_error_r.c [new file with mode: 0644]
tools/lib/subcmd/Makefile
tools/lib/subcmd/run-command.c
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/parse-filter.c
tools/lib/vsprintf.c [new file with mode: 0644]
tools/objtool/Build
tools/objtool/Makefile
tools/objtool/builtin-check.c
tools/objtool/elf.c
tools/perf/.gitignore
tools/perf/Documentation/android.txt
tools/perf/Documentation/perf-annotate.txt
tools/perf/Documentation/perf-buildid-cache.txt
tools/perf/Documentation/perf-data.txt
tools/perf/Documentation/perf-mem.txt
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf-test.txt
tools/perf/Documentation/perf.data-file-format.txt [new file with mode: 0644]
tools/perf/MANIFEST
tools/perf/Makefile.perf
tools/perf/arch/arm/util/Build
tools/perf/arch/arm64/util/Build
tools/perf/arch/arm64/util/unwind-libunwind.c
tools/perf/arch/common.c
tools/perf/arch/common.h
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/arch/x86/tests/perf-time-to-tsc.c
tools/perf/arch/x86/tests/rdpmc.c
tools/perf/arch/x86/util/Build
tools/perf/arch/x86/util/auxtrace.c
tools/perf/arch/x86/util/group.c [new file with mode: 0644]
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/arch/x86/util/tsc.c
tools/perf/arch/x86/util/unwind-libunwind.c
tools/perf/bench/futex-hash.c
tools/perf/bench/futex-lock-pi.c
tools/perf/bench/futex-requeue.c
tools/perf/bench/futex-wake-parallel.c
tools/perf/bench/futex-wake.c
tools/perf/bench/mem-memcpy-x86-64-asm.S
tools/perf/bench/mem-memset-x86-64-asm.S
tools/perf/bench/numa.c
tools/perf/builtin-annotate.c
tools/perf/builtin-buildid-cache.c
tools/perf/builtin-config.c
tools/perf/builtin-data.c
tools/perf/builtin-diff.c
tools/perf/builtin-evlist.c
tools/perf/builtin-help.c
tools/perf/builtin-inject.c
tools/perf/builtin-kmem.c
tools/perf/builtin-kvm.c
tools/perf/builtin-list.c
tools/perf/builtin-mem.c
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/config/Makefile
tools/perf/jvmti/jvmti_agent.c
tools/perf/perf-sys.h
tools/perf/perf.c
tools/perf/perf.h
tools/perf/python/tracepoint.py [new file with mode: 0755]
tools/perf/scripts/python/bin/stackcollapse-record [new file with mode: 0755]
tools/perf/scripts/python/bin/stackcollapse-report [new file with mode: 0755]
tools/perf/scripts/python/stackcollapse.py [new file with mode: 0755]
tools/perf/tests/Build
tools/perf/tests/backward-ring-buffer.c
tools/perf/tests/bpf-script-example.c
tools/perf/tests/bpf.c
tools/perf/tests/builtin-test.c
tools/perf/tests/cpumap.c
tools/perf/tests/dso-data.c
tools/perf/tests/event-times.c
tools/perf/tests/evsel-roundtrip-name.c
tools/perf/tests/fdarray.c
tools/perf/tests/hists_cumulate.c
tools/perf/tests/hists_filter.c
tools/perf/tests/hists_link.c
tools/perf/tests/is_printable_array.c [new file with mode: 0644]
tools/perf/tests/llvm.c
tools/perf/tests/make
tools/perf/tests/mmap-basic.c
tools/perf/tests/openat-syscall-all-cpus.c
tools/perf/tests/openat-syscall-tp-fields.c
tools/perf/tests/openat-syscall.c
tools/perf/tests/parse-events.c
tools/perf/tests/parse-no-sample-id-all.c
tools/perf/tests/perf-record.c
tools/perf/tests/sdt.c [new file with mode: 0644]
tools/perf/tests/sw-clock.c
tools/perf/tests/switch-tracking.c
tools/perf/tests/task-exit.c
tools/perf/tests/tests.h
tools/perf/tests/thread-map.c
tools/perf/trace/beauty/eventfd.c
tools/perf/trace/beauty/flock.c
tools/perf/trace/beauty/futex_op.c
tools/perf/trace/beauty/mmap.c
tools/perf/trace/beauty/msg_flags.c
tools/perf/trace/beauty/open_flags.c
tools/perf/trace/beauty/sched_policy.c
tools/perf/trace/beauty/seccomp.c
tools/perf/ui/browser.c
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/browsers/hists.h [new file with mode: 0644]
tools/perf/ui/gtk/hists.c
tools/perf/ui/gtk/util.c
tools/perf/ui/helpline.c
tools/perf/ui/hist.c
tools/perf/ui/setup.c
tools/perf/ui/stdio/hist.c
tools/perf/ui/tui/setup.c
tools/perf/ui/ui.h
tools/perf/util/Build
tools/perf/util/alias.c
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/auxtrace.h
tools/perf/util/bpf-loader.c
tools/perf/util/bpf-loader.h
tools/perf/util/build-id.c
tools/perf/util/build-id.h
tools/perf/util/cache.h
tools/perf/util/callchain.h
tools/perf/util/cgroup.c
tools/perf/util/cloexec.c
tools/perf/util/color.c
tools/perf/util/config.c
tools/perf/util/config.h
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/data-convert-bt.c
tools/perf/util/data-convert-bt.h
tools/perf/util/data-convert.h [new file with mode: 0644]
tools/perf/util/data.c
tools/perf/util/db-export.c
tools/perf/util/debug.h
tools/perf/util/demangle-rust.c [new file with mode: 0644]
tools/perf/util/demangle-rust.h [new file with mode: 0644]
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/env.c
tools/perf/util/env.h
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/group.h [new file with mode: 0644]
tools/perf/util/header.c
tools/perf/util/help-unknown-cmd.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/asm/byteorder.h [deleted file]
tools/perf/util/include/asm/unistd_32.h [deleted file]
tools/perf/util/include/asm/unistd_64.h [deleted file]
tools/perf/util/include/linux/const.h [deleted file]
tools/perf/util/intel-bts.c
tools/perf/util/intel-pt-decoder/Build
tools/perf/util/intel-pt.c
tools/perf/util/intlist.h
tools/perf/util/jitdump.c
tools/perf/util/levenshtein.c
tools/perf/util/libunwind/arm64.c [new file with mode: 0644]
tools/perf/util/libunwind/x86_32.c [new file with mode: 0644]
tools/perf/util/llvm-utils.c
tools/perf/util/llvm-utils.h
tools/perf/util/machine.c
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/mem-events.c
tools/perf/util/mem-events.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/path.c
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-file.c
tools/perf/util/probe-file.h
tools/perf/util/probe-finder.c
tools/perf/util/python-ext-sources
tools/perf/util/python.c
tools/perf/util/quote.c
tools/perf/util/quote.h
tools/perf/util/rb_resort.h
tools/perf/util/record.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/stat-shadow.c
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/strbuf.c
tools/perf/util/strbuf.h
tools/perf/util/strlist.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/target.c
tools/perf/util/thread-stack.c
tools/perf/util/thread-stack.h
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/thread_map.c
tools/perf/util/trace-event.c
tools/perf/util/trace-event.h
tools/perf/util/unwind-libunwind-local.c [new file with mode: 0644]
tools/perf/util/unwind-libunwind.c
tools/perf/util/unwind.h
tools/perf/util/util.c
tools/perf/util/util.h
tools/perf/util/vdso.c
tools/testing/radix-tree/tag_check.c
tools/testing/selftests/rcutorture/bin/functions.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/parse-console.sh
tools/testing/selftests/rcutorture/doc/initrd.txt
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/mpx-debug.h [new file with mode: 0644]
tools/testing/selftests/x86/mpx-dig.c [new file with mode: 0644]
tools/testing/selftests/x86/mpx-hw.h [new file with mode: 0644]
tools/testing/selftests/x86/mpx-mini-test.c [new file with mode: 0644]
tools/testing/selftests/x86/mpx-mm.h [new file with mode: 0644]
tools/testing/selftests/x86/test_mremap_vdso.c [new file with mode: 0644]
tools/vm/slabinfo.c

index 52489f56406941d569b8f55ec2af9c70fbc89206..d2acafb09e60c9a6a93382c5555e9376d24eaeeb 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -11,6 +11,7 @@ Aaron Durbin <adurbin@google.com>
 Adam Oldham <oldhamca@gmail.com>
 Adam Radford <aradford@gmail.com>
 Adrian Bunk <bunk@stusta.de>
+Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
 Alan Cox <alan@lxorguk.ukuu.org.uk>
 Alan Cox <root@hraefn.swansea.linux.org.uk>
 Aleksey Gorelov <aleksey_gorelov@phoenix.com>
@@ -94,6 +95,8 @@ Linas Vepstas <linas@austin.ibm.com>
 Mark Brown <broonie@sirena.org.uk>
 Matthieu CASTET <castet.matthieu@free.fr>
 Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>
+Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com>
+Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com>
 Mayuresh Janorkar <mayur@ti.com>
 Michael Buesch <m@bues.ch>
 Michel Dänzer <michel@tungstengraphics.com>
index 2483756fccf58ecbee5ed51a4d1c4af669b29411..aebda53ec0f775393b1a6afba6a4939af369a937 100644 (file)
@@ -19,3 +19,16 @@ KernelVersion:       4.4
 Description:
                High resolution timers directory. Creating a directory here
                will result in creating a hrtimer trigger in the IIO subsystem.
+
+What:          /config/iio/devices
+Date:          April 2016
+KernelVersion: 4.7
+Description:
+               Industrial IO software devices directory.
+
+What:          /config/iio/devices/dummy
+Date:          April 2016
+KernelVersion: 4.7
+Description:
+               Dummy IIO devices directory. Creating a directory here will result
+               in creating a dummy IIO device in the IIO subystem.
index df44998e7506cc49ea54fe276bc9239a8b21d09f..fee35c00cc4ed6fd59fdff3fd14cac396e9336a4 100644 (file)
@@ -32,6 +32,13 @@ Description:
                Description of the physical chip / device for device X.
                Typically a part number.
 
+What:          /sys/bus/iio/devices/iio:deviceX/timestamp_clock
+KernelVersion: 4.5
+Contact:       linux-iio@vger.kernel.org
+Description:
+               String identifying current posix clock used to timestamp
+               buffered samples and events for device X.
+
 What:          /sys/bus/iio/devices/iio:deviceX/sampling_frequency
 What:          /sys/bus/iio/devices/iio:deviceX/buffer/sampling_frequency
 What:          /sys/bus/iio/devices/triggerX/sampling_frequency
@@ -1565,3 +1572,10 @@ Description:
                * X is in the plane of the propellers, perpendicular to Y axis,
                  and positive towards the starboard side of the UAV ;
                * Z is perpendicular to propellers plane and positive upwards.
+
+What:          /sys/bus/iio/devices/iio:deviceX/in_electricalconductivity_raw
+KernelVersion: 4.8
+Contact:       linux-iio@vger.kernel.org
+Description:
+               Raw (unscaled no offset etc.) electric conductivity reading that
+               can be processed to siemens per meter.
index 3740f253d406975ca7a6d13ebb96acd2cfc5d8f8..6adba9058b22b649d772381ae24634c1d8a0f940 100644 (file)
@@ -1,54 +1,41 @@
-What:          /sys/bus/iio/devices/iio:deviceX/tia_resistanceY
-               /sys/bus/iio/devices/iio:deviceX/tia_capacitanceY
-Date:          December 2015
-KernelVersion:
-Contact:       Andrew F. Davis <afd@ti.com>
-Description:
-               Get and set the resistance and the capacitance settings for the
-               Transimpedance Amplifier. Y is 1 for Rf1 and Cf1, Y is 2 for
-               Rf2 and Cf2 values.
-
-What:          /sys/bus/iio/devices/iio:deviceX/tia_separate_en
-Date:          December 2015
-KernelVersion:
-Contact:       Andrew F. Davis <afd@ti.com>
-Description:
-               Enable or disable separate settings for the TransImpedance
-               Amplifier above, when disabled both values are set by the
-               first channel.
-
-What:          /sys/bus/iio/devices/iio:deviceX/in_intensity_ledY_raw
-               /sys/bus/iio/devices/iio:deviceX/in_intensity_ledY_ambient_raw
-Date:          December 2015
+What:          /sys/bus/iio/devices/iio:deviceX/in_intensityY_raw
+Date:          May 2016
 KernelVersion:
 Contact:       Andrew F. Davis <afd@ti.com>
 Description:
                Get measured values from the ADC for these stages. Y is the
-               specific LED number. The values are expressed in 24-bit twos
-               complement.
+               specific stage number corresponding to datasheet stage names
+               as follows:
+               1 -> LED2
+               2 -> ALED2/LED3
+               3 -> LED1
+               4 -> ALED1/LED4
+               Note that channels 5 and 6 represent LED2-ALED2 and LED1-ALED1
+               respectively which simply helper channels containing the
+               calculated difference in the value of stage 1 - 2 and 3 - 4.
+               The values are expressed in 24-bit twos complement.
 
-What:          /sys/bus/iio/devices/iio:deviceX/in_intensity_ledY-ledY_ambient_raw
-Date:          December 2015
+What:          /sys/bus/iio/devices/iio:deviceX/in_intensityY_offset
+Date:          May 2016
 KernelVersion:
 Contact:       Andrew F. Davis <afd@ti.com>
 Description:
-               Get differential values from the ADC for these stages. Y is the
-               specific LED number. The values are expressed in 24-bit twos
-               complement for the specified LEDs.
+               Get and set the offset cancellation DAC setting for these
+               stages. The values are expressed in 5-bit sign-magnitude.
 
-What:          /sys/bus/iio/devices/iio:deviceX/out_current_ledY_offset
-               /sys/bus/iio/devices/iio:deviceX/out_current_ledY_ambient_offset
-Date:          December 2015
+What:          /sys/bus/iio/devices/iio:deviceX/in_intensityY_resistance
+What:          /sys/bus/iio/devices/iio:deviceX/in_intensityY_capacitance
+Date:          May 2016
 KernelVersion:
 Contact:       Andrew F. Davis <afd@ti.com>
 Description:
-               Get and set the offset cancellation DAC setting for these
-               stages. The values are expressed in 5-bit sign-magnitude.
+               Get and set the resistance and the capacitance settings for the
+               Transimpedance Amplifier during the associated stage.
 
-What:          /sys/bus/iio/devices/iio:deviceX/out_current_ledY_raw
-Date:          December 2015
+What:          /sys/bus/iio/devices/iio:deviceX/out_currentY_raw
+Date:          May 2016
 KernelVersion:
 Contact:       Andrew F. Davis <afd@ti.com>
 Description:
-               Get and set the LED current for the specified LED. Y is the
-               specific LED number.
+               Get and set the LED current for the specified LED active during
+               this stage. Y is the specific stage number.
index f525bf56d1dd0183d8614c9130fafe0196e82b0d..e2ab6a1f223e99ace1751eaddbde1d3736132486 100644 (file)
 
     irqreturn_t sensor_iio_pollfunc(int irq, void *p)
     {
-        pf->timestamp = iio_get_time_ns();
+        pf->timestamp = iio_get_time_ns((struct indio_dev *)p);
         return IRQ_WAKE_THREAD;
     }
 
index e7e24b3e86e29e2c721c447c237de890b11e7062..ece410f40436dc4f88fdf521bf84b7191b0ff798 100644 (file)
@@ -2391,6 +2391,41 @@ and <tt>RCU_NONIDLE()</tt> on the other while inspecting
 idle-loop code.
 Steven Rostedt supplied <tt>_rcuidle</tt> event tracing,
 which is used quite heavily in the idle loop.
+However, there are some restrictions on the code placed within
+<tt>RCU_NONIDLE()</tt>:
+
+<ol>
+<li>   Blocking is prohibited.
+       In practice, this is not a serious restriction given that idle
+       tasks are prohibited from blocking to begin with.
+<li>   Although nesting <tt>RCU_NONIDLE()</tt> is permited, they cannot
+       nest indefinitely deeply.
+       However, given that they can be nested on the order of a million
+       deep, even on 32-bit systems, this should not be a serious
+       restriction.
+       This nesting limit would probably be reached long after the
+       compiler OOMed or the stack overflowed.
+<li>   Any code path that enters <tt>RCU_NONIDLE()</tt> must sequence
+       out of that same <tt>RCU_NONIDLE()</tt>.
+       For example, the following is grossly illegal:
+
+       <blockquote>
+       <pre>
+ 1     RCU_NONIDLE({
+ 2       do_something();
+ 3       goto bad_idea;  /* BUG!!! */
+ 4       do_something_else();});
+ 5   bad_idea:
+       </pre>
+       </blockquote>
+
+       <p>
+       It is just as illegal to transfer control into the middle of
+       <tt>RCU_NONIDLE()</tt>'s argument.
+       Yes, in theory, you could transfer in as long as you also
+       transferred out, but in practice you could also expect to get sharply
+       worded review comments.
+</ol>
 
 <p>
 It is similarly socially unacceptable to interrupt an
index 0f7fb4298e7e0b657a7df678ee0852de7ab7887d..e93d04133fe7ae58cfb6c3da3e0717b0640f3b9a 100644 (file)
@@ -49,7 +49,7 @@ rcupdate.rcu_task_stall_timeout
        This boot/sysfs parameter controls the RCU-tasks stall warning
        interval.  A value of zero or less suppresses RCU-tasks stall
        warnings.  A positive value sets the stall-warning interval
-       in jiffies.  An RCU-tasks stall warning starts wtih the line:
+       in jiffies.  An RCU-tasks stall warning starts with the line:
 
                INFO: rcu_tasks detected stalls on tasks:
 
index 111770ffa10e7cc4c3d2dada09e1e85f8d576099..204422719197ec080e13bd9f076e5c20d960c541 100644 (file)
@@ -5,6 +5,9 @@ to start learning about RCU:
 2.     What is RCU? Part 2: Usage   http://lwn.net/Articles/263130/
 3.     RCU part 3: the RCU API      http://lwn.net/Articles/264090/
 4.     The RCU API, 2010 Edition    http://lwn.net/Articles/418853/
+       2010 Big API Table           http://lwn.net/Articles/419086/
+5.     The RCU API, 2014 Edition    http://lwn.net/Articles/609904/
+       2014 Big API Table           http://lwn.net/Articles/609973/
 
 
 What is RCU?
index e27341f8a4c7e3a3a58a31f37914d2b9a21b9d88..7f3d94ae81ffb64f5b9de085d9f1015eca5b7b8a 100644 (file)
@@ -46,7 +46,8 @@ Optional properties:
     The second cell represents the MICBIAS to be used.
     The third cell represents the value of the micd-pol-gpio pin.
 
-  - wlf,gpsw : Settings for the general purpose switch
+  - wlf,gpsw : Settings for the general purpose switch, set as one of the
+    ARIZONA_GPSW_XXX defines.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/hwmon/apm-xgene-hwmon.txt b/Documentation/devicetree/bindings/hwmon/apm-xgene-hwmon.txt
new file mode 100644 (file)
index 0000000..59b3855
--- /dev/null
@@ -0,0 +1,14 @@
+APM X-Gene hwmon driver
+
+APM X-Gene SOC sensors are accessed over the "SLIMpro" mailbox.
+
+Required properties :
+ - compatible : should be "apm,xgene-slimpro-hwmon"
+ - mboxes : use the label reference for the mailbox as the first parameter.
+           The second parameter is the channel number.
+
+Example :
+       hwmonslimpro {
+               compatible = "apm,xgene-slimpro-hwmon";
+               mboxes = <&mailbox 7>;
+       };
diff --git a/Documentation/devicetree/bindings/hwmon/jc42.txt b/Documentation/devicetree/bindings/hwmon/jc42.txt
new file mode 100644 (file)
index 0000000..07a2504
--- /dev/null
@@ -0,0 +1,42 @@
+Properties for Jedec JC-42.4 compatible temperature sensors
+
+Required properties:
+- compatible: May include a device-specific string consisting of the
+             manufacturer and the name of the chip. A list of supported
+             chip names follows.
+             Must include "jedec,jc-42.4-temp" for any Jedec JC-42.4
+             compatible temperature sensor.
+
+             Supported chip names:
+               adi,adt7408
+               atmel,at30ts00
+               atmel,at30tse004
+               onnn,cat6095
+               onnn,cat34ts02
+               maxim,max6604
+               microchip,mcp9804
+               microchip,mcp9805
+               microchip,mcp9808
+               microchip,mcp98243
+               microchip,mcp98244
+               microchip,mcp9843
+               nxp,se97
+               nxp,se98
+               st,stts2002
+               st,stts2004
+               st,stts3000
+               st,stts424
+               st,stts424e
+               idt,tse2002
+               idt,tse2004
+               idt,ts3000
+               idt,ts3001
+
+- reg: I2C address
+
+Example:
+
+temp-sensor@1a {
+       compatible = "jedec,jc-42.4-temp";
+       reg = <0x1a>;
+};
index 539874490492de666274aff20fda6dc1660f681d..acc5cd64711c8f322db51d17797cbfe1fa471d91 100644 (file)
@@ -56,6 +56,70 @@ maxim,ds1050         5 Bit Programmable, Pulse-Width Modulator
 maxim,max1237          Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
 maxim,max6625          9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
 mc,rv3029c2            Real Time Clock Module with I2C-Bus
+microchip,mcp4531-502  Microchip 7-bit Single I2C Digital Potentiometer (5k)
+microchip,mcp4531-103  Microchip 7-bit Single I2C Digital Potentiometer (10k)
+microchip,mcp4531-503  Microchip 7-bit Single I2C Digital Potentiometer (50k)
+microchip,mcp4531-104  Microchip 7-bit Single I2C Digital Potentiometer (100k)
+microchip,mcp4532-502  Microchip 7-bit Single I2C Digital Potentiometer (5k)
+microchip,mcp4532-103  Microchip 7-bit Single I2C Digital Potentiometer (10k)
+microchip,mcp4532-503  Microchip 7-bit Single I2C Digital Potentiometer (50k)
+microchip,mcp4532-104  Microchip 7-bit Single I2C Digital Potentiometer (100k)
+microchip,mcp4541-502  Microchip 7-bit Single I2C Digital Potentiometer with NV Memory (5k)
+microchip,mcp4541-103  Microchip 7-bit Single I2C Digital Potentiometer with NV Memory (10k)
+microchip,mcp4541-503  Microchip 7-bit Single I2C Digital Potentiometer with NV Memory (50k)
+microchip,mcp4541-104  Microchip 7-bit Single I2C Digital Potentiometer with NV Memory (100k)
+microchip,mcp4542-502  Microchip 7-bit Single I2C Digital Potentiometer with NV Memory (5k)
+microchip,mcp4542-103  Microchip 7-bit Single I2C Digital Potentiometer with NV Memory (10k)
+microchip,mcp4542-503  Microchip 7-bit Single I2C Digital Potentiometer with NV Memory (50k)
+microchip,mcp4542-104  Microchip 7-bit Single I2C Digital Potentiometer with NV Memory (100k)
+microchip,mcp4551-502  Microchip 8-bit Single I2C Digital Potentiometer (5k)
+microchip,mcp4551-103  Microchip 8-bit Single I2C Digital Potentiometer (10k)
+microchip,mcp4551-503  Microchip 8-bit Single I2C Digital Potentiometer (50k)
+microchip,mcp4551-104  Microchip 8-bit Single I2C Digital Potentiometer (100k)
+microchip,mcp4552-502  Microchip 8-bit Single I2C Digital Potentiometer (5k)
+microchip,mcp4552-103  Microchip 8-bit Single I2C Digital Potentiometer (10k)
+microchip,mcp4552-503  Microchip 8-bit Single I2C Digital Potentiometer (50k)
+microchip,mcp4552-104  Microchip 8-bit Single I2C Digital Potentiometer (100k)
+microchip,mcp4561-502  Microchip 8-bit Single I2C Digital Potentiometer with NV Memory (5k)
+microchip,mcp4561-103  Microchip 8-bit Single I2C Digital Potentiometer with NV Memory (10k)
+microchip,mcp4561-503  Microchip 8-bit Single I2C Digital Potentiometer with NV Memory (50k)
+microchip,mcp4561-104  Microchip 8-bit Single I2C Digital Potentiometer with NV Memory (100k)
+microchip,mcp4562-502  Microchip 8-bit Single I2C Digital Potentiometer with NV Memory (5k)
+microchip,mcp4562-103  Microchip 8-bit Single I2C Digital Potentiometer with NV Memory (10k)
+microchip,mcp4562-503  Microchip 8-bit Single I2C Digital Potentiometer with NV Memory (50k)
+microchip,mcp4562-104  Microchip 8-bit Single I2C Digital Potentiometer with NV Memory (100k)
+microchip,mcp4631-502  Microchip 7-bit Dual I2C Digital Potentiometer (5k)
+microchip,mcp4631-103  Microchip 7-bit Dual I2C Digital Potentiometer (10k)
+microchip,mcp4631-503  Microchip 7-bit Dual I2C Digital Potentiometer (50k)
+microchip,mcp4631-104  Microchip 7-bit Dual I2C Digital Potentiometer (100k)
+microchip,mcp4632-502  Microchip 7-bit Dual I2C Digital Potentiometer (5k)
+microchip,mcp4632-103  Microchip 7-bit Dual I2C Digital Potentiometer (10k)
+microchip,mcp4632-503  Microchip 7-bit Dual I2C Digital Potentiometer (50k)
+microchip,mcp4632-104  Microchip 7-bit Dual I2C Digital Potentiometer (100k)
+microchip,mcp4641-502  Microchip 7-bit Dual I2C Digital Potentiometer with NV Memory (5k)
+microchip,mcp4641-103  Microchip 7-bit Dual I2C Digital Potentiometer with NV Memory (10k)
+microchip,mcp4641-503  Microchip 7-bit Dual I2C Digital Potentiometer with NV Memory (50k)
+microchip,mcp4641-104  Microchip 7-bit Dual I2C Digital Potentiometer with NV Memory (100k)
+microchip,mcp4642-502  Microchip 7-bit Dual I2C Digital Potentiometer with NV Memory (5k)
+microchip,mcp4642-103  Microchip 7-bit Dual I2C Digital Potentiometer with NV Memory (10k)
+microchip,mcp4642-503  Microchip 7-bit Dual I2C Digital Potentiometer with NV Memory (50k)
+microchip,mcp4642-104  Microchip 7-bit Dual I2C Digital Potentiometer with NV Memory (100k)
+microchip,mcp4651-502  Microchip 8-bit Dual I2C Digital Potentiometer (5k)
+microchip,mcp4651-103  Microchip 8-bit Dual I2C Digital Potentiometer (10k)
+microchip,mcp4651-503  Microchip 8-bit Dual I2C Digital Potentiometer (50k)
+microchip,mcp4651-104  Microchip 8-bit Dual I2C Digital Potentiometer (100k)
+microchip,mcp4652-502  Microchip 8-bit Dual I2C Digital Potentiometer (5k)
+microchip,mcp4652-103  Microchip 8-bit Dual I2C Digital Potentiometer (10k)
+microchip,mcp4652-503  Microchip 8-bit Dual I2C Digital Potentiometer (50k)
+microchip,mcp4652-104  Microchip 8-bit Dual I2C Digital Potentiometer (100k)
+microchip,mcp4661-502  Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (5k)
+microchip,mcp4661-103  Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (10k)
+microchip,mcp4661-503  Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (50k)
+microchip,mcp4661-104  Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k)
+microchip,mcp4662-502  Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (5k)
+microchip,mcp4662-103  Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (10k)
+microchip,mcp4662-503  Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (50k)
+microchip,mcp4662-104  Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k)
 national,lm63          Temperature sensor with integrated fan control
 national,lm75          I2C TEMP SENSOR
 national,lm80          Serial Interface ACPI-Compatible Microprocessor System Hardware Monitor
diff --git a/Documentation/devicetree/bindings/iio/adc/brcm,iproc-static-adc.txt b/Documentation/devicetree/bindings/iio/adc/brcm,iproc-static-adc.txt
new file mode 100644 (file)
index 0000000..caaaed7
--- /dev/null
@@ -0,0 +1,41 @@
+* Broadcom's IPROC Static ADC controller
+
+Broadcom iProc ADC controller has 8 channels 10bit ADC.
+Allows user to convert analog input voltage values to digital.
+
+Required properties:
+
+- compatible: Must be "brcm,iproc-static-adc"
+
+- adc-syscon: Handler of syscon node defining physical base address of the
+  controller and length of memory mapped region.
+
+- #io-channel-cells = <1>; As ADC has multiple outputs
+  refer to Documentation/devicetree/bindings/iio/iio-bindings.txt for details.
+
+- io-channel-ranges:
+  refer to Documentation/devicetree/bindings/iio/iio-bindings.txt for details.
+
+- clocks: Clock used for this block.
+
+- clock-names: Clock name should be given as tsc_clk.
+
+- interrupts: interrupt line number.
+
+For example:
+
+       ts_adc_syscon: ts_adc_syscon@180a6000 {
+               compatible = "brcm,iproc-ts-adc-syscon","syscon";
+               reg = <0x180a6000 0xc30>;
+       };
+
+       adc: adc@180a6000 {
+               compatible = "brcm,iproc-static-adc";
+               adc-syscon = <&ts_adc_syscon>;
+               #io-channel-cells = <1>;
+               io-channel-ranges;
+               clocks = <&asiu_clks BCM_CYGNUS_ASIU_ADC_CLK>;
+               clock-names = "tsc_clk";
+               interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
+               status = "disabled";
+       };
diff --git a/Documentation/devicetree/bindings/iio/adc/max1363.txt b/Documentation/devicetree/bindings/iio/adc/max1363.txt
new file mode 100644 (file)
index 0000000..94a9011
--- /dev/null
@@ -0,0 +1,63 @@
+* Maxim 1x3x/136x/116xx Analog to Digital Converter (ADC)
+
+The node for this driver must be a child node of a I2C controller, hence
+all mandatory properties for your controller must be specified. See directory:
+
+        Documentation/devicetree/bindings/i2c
+
+for more details.
+
+Required properties:
+  - compatible: Should be one of
+               "maxim,max1361"
+               "maxim,max1362"
+               "maxim,max1363"
+               "maxim,max1364"
+               "maxim,max1036"
+               "maxim,max1037"
+               "maxim,max1038"
+               "maxim,max1039"
+               "maxim,max1136"
+               "maxim,max1137"
+               "maxim,max1138"
+               "maxim,max1139"
+               "maxim,max1236"
+               "maxim,max1237"
+               "maxim,max1238"
+               "maxim,max1239"
+               "maxim,max11600"
+               "maxim,max11601"
+               "maxim,max11602"
+               "maxim,max11603"
+               "maxim,max11604"
+               "maxim,max11605"
+               "maxim,max11606"
+               "maxim,max11607"
+               "maxim,max11608"
+               "maxim,max11609"
+               "maxim,max11610"
+               "maxim,max11611"
+               "maxim,max11612"
+               "maxim,max11613"
+               "maxim,max11614"
+               "maxim,max11615"
+               "maxim,max11616"
+               "maxim,max11617"
+               "maxim,max11644"
+               "maxim,max11645"
+               "maxim,max11646"
+               "maxim,max11647"
+  - reg: Should contain the ADC I2C address
+
+Optional properties:
+  - vcc-supply: phandle to the regulator that provides power to the ADC.
+  - vref-supply: phandle to the regulator for ADC reference voltage.
+  - interrupts: IRQ line for the ADC. If not used the driver will use
+    polling.
+
+Example:
+adc: max11644@36 {
+       compatible = "maxim,max11644";
+       reg = <0x36>;
+       vref-supply = <&adc_vref>;
+};
diff --git a/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt b/Documentation/devicetree/bindings/iio/chemical/atlas,ec-sm.txt
new file mode 100644 (file)
index 0000000..2962bd9
--- /dev/null
@@ -0,0 +1,22 @@
+* Atlas Scientific EC-SM OEM sensor
+
+http://www.atlas-scientific.com/_files/_datasheets/_oem/EC_oem_datasheet.pdf
+
+Required properties:
+
+  - compatible: must be "atlas,ec-sm"
+  - reg: the I2C address of the sensor
+  - interrupt-parent: should be the phandle for the interrupt controller
+  - interrupts: the sole interrupt generated by the device
+
+  Refer to interrupt-controller/interrupts.txt for generic interrupt client
+  node bindings.
+
+Example:
+
+atlas@64 {
+       compatible = "atlas,ec-sm";
+       reg = <0x64>;
+       interrupt-parent = <&gpio1>;
+       interrupts = <16 2>;
+};
diff --git a/Documentation/devicetree/bindings/iio/dac/ad5755.txt b/Documentation/devicetree/bindings/iio/dac/ad5755.txt
new file mode 100644 (file)
index 0000000..f0bbd7e
--- /dev/null
@@ -0,0 +1,124 @@
+* Analog Device AD5755 IIO Multi-Channel DAC Linux Driver
+
+Required properties:
+ - compatible: Has to contain one of the following:
+       adi,ad5755
+       adi,ad5755-1
+       adi,ad5757
+       adi,ad5735
+       adi,ad5737
+
+ - reg: spi chip select number for the device
+ - spi-cpha or spi-cpol: is the only modes that is supported
+
+Recommended properties:
+ - spi-max-frequency: Definition as per
+               Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Optional properties:
+See include/dt-bindings/iio/ad5755.h
+ - adi,ext-dc-dc-compenstation-resistor: boolean set if the hardware have an
+                                        external resistor and thereby bypasses
+                                        the internal compensation resistor.
+ - adi,dc-dc-phase:
+       Valid values for DC DC Phase control is:
+       0: All dc-to-dc converters clock on the same edge.
+       1: Channel A and Channel B clock on the same edge,
+          Channel C and Channel D clock on opposite edges.
+       2: Channel A and Channel C clock on the same edge,
+          Channel B and Channel D clock on opposite edges.
+       3: Channel A, Channel B, Channel C, and Channel D
+          clock 90 degrees out of phase from each other.
+ - adi,dc-dc-freq-hz:
+       Valid values for DC DC frequency is [Hz]:
+       250000
+       410000
+       650000
+ - adi,dc-dc-max-microvolt:
+       Valid values for the maximum allowed Vboost voltage supplied by
+       the dc-to-dc converter is:
+       23000000
+       24500000
+       27000000
+       29500000
+
+Optional for every channel:
+ - adi,mode:
+       Valid values for DAC modes is:
+       0: 0 V to 5 V voltage range.
+       1: 0 V to 10 V voltage range.
+       2: Plus minus 5 V voltage range.
+       3: Plus minus 10 V voltage range.
+       4: 4 mA to 20 mA current range.
+       5: 0 mA to 20 mA current range.
+       6: 0 mA to 24 mA current range.
+ - adi,ext-current-sense-resistor: boolean set if the hardware a external
+                                  current sense resistor.
+ - adi,enable-voltage-overrange: boolean enable voltage overrange
+ - adi,slew: Array of slewrate settings should contain 3 fields:
+       1: Should be either 0 or 1 in order to enable or disable slewrate.
+       2: Slew rate settings:
+               Valid values for the slew rate update frequency:
+               64000
+               32000
+               16000
+               8000
+               4000
+               2000
+               1000
+               500
+               250
+               125
+               64
+               32
+               16
+               8
+               4
+               0
+       3: Slew step size:
+               Valid values for the step size LSBs:
+               1
+               2
+               4
+               16
+               32
+               64
+               128
+               256
+
+Example:
+dac@0 {
+       #address-cells = <1>;
+       #size-cells = <0>;
+       compatible = "adi,ad5755";
+       reg = <0>;
+       spi-max-frequency = <1000000>;
+       spi-cpha;
+       adi,dc-dc-phase = <0>;
+       adi,dc-dc-freq-hz = <410000>;
+       adi,dc-dc-max-microvolt = <23000000>;
+       channel@0 {
+               reg = <0>;
+               adi,mode = <4>;
+               adi,ext-current-sense-resistor;
+               adi,slew = <0 64000 1>;
+       };
+       channel@1 {
+               reg = <1>;
+               adi,mode = <4>;
+               adi,ext-current-sense-resistor;
+               adi,slew = <0 64000 1>;
+       };
+       channel@2 {
+               reg = <2>;
+               adi,mode = <4>;
+               adi,ext-current-sense-resistor;
+               adi,slew = <0 64000 1>;
+       };
+       channel@3 {
+               reg = <3>;
+               adi,mode = <4>;
+               adi,ext-current-sense-resistor;
+               adi,slew = <0 64000 1>;
+       };
+};
index d7a6deb6b21e3d2e29d47a73081acaf4463d4d5e..c7198a03c906250a710bc268d28dc6b542b41f66 100644 (file)
@@ -1,7 +1,11 @@
-BMP085/BMP18x digital pressure sensors
+BMP085/BMP18x/BMP28x digital pressure sensors
 
 Required properties:
-- compatible: bosch,bmp085
+- compatible: must be one of:
+  "bosch,bmp085"
+  "bosch,bmp180"
+  "bosch,bmp280"
+  "bosch,bme280"
 
 Optional properties:
 - chip-id: configurable chip id for non-default chip revisions
@@ -10,6 +14,10 @@ Optional properties:
   value range is 0-3 with rising sensitivity.
 - interrupt-parent: should be the phandle for the interrupt controller
 - interrupts: interrupt mapping for IRQ
+- reset-gpios: a GPIO line handling reset of the sensor: as the line is
+  active low, it should be marked GPIO_ACTIVE_LOW (see gpio/gpio.txt)
+- vddd-supply: digital voltage regulator (see regulator/regulator.txt)
+- vdda-supply: analog voltage regulator (see regulator/regulator.txt)
 
 Example:
 
@@ -21,4 +29,7 @@ pressure@77 {
        default-oversampling = <2>;
        interrupt-parent = <&gpio0>;
        interrupts = <25 IRQ_TYPE_EDGE_RISING>;
+       reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>;
+       vddd-supply = <&foo>;
+       vdda-supply = <&bar>;
 };
index 5844cf72862de4a543b4519387e2e0147ab4f626..e41fe340162bdd8b262a54f1d9376b58ba73d513 100644 (file)
@@ -64,3 +64,4 @@ Pressure sensors:
 - st,lps001wp-press
 - st,lps25h-press
 - st,lps331ap-press
+- st,lps22hb-press
diff --git a/Documentation/devicetree/bindings/phy/phy-da8xx-usb.txt b/Documentation/devicetree/bindings/phy/phy-da8xx-usb.txt
new file mode 100644 (file)
index 0000000..c26478b
--- /dev/null
@@ -0,0 +1,40 @@
+TI DA8xx/OMAP-L1xx/AM18xx USB PHY
+
+Required properties:
+ - compatible: must be "ti,da830-usb-phy".
+ - #phy-cells: must be 1.
+
+This device controls the PHY for both the USB 1.1 OHCI and USB 2.0 OTG
+controllers on DA8xx SoCs. Consumers of this device should use index 0 for
+the USB 2.0 phy device and index 1 for the USB 1.1 phy device.
+
+It also requires a "syscon" node with compatible = "ti,da830-cfgchip", "syscon"
+to access the CFGCHIP2 register.
+
+Example:
+
+       cfgchip: cfgchip@1417c {
+               compatible = "ti,da830-cfgchip", "syscon";
+               reg = <0x1417c 0x14>;
+       };
+
+       usb_phy: usb-phy {
+               compatible = "ti,da830-usb-phy";
+               #phy-cells = <1>;
+       };
+
+       usb20: usb@200000 {
+               compatible = "ti,da830-musb";
+               reg = <0x200000 0x1000>;
+               interrupts = <58>;
+               phys = <&usb_phy 0>;
+               phy-names = "usb-phy";
+       };
+
+       usb11: usb@225000 {
+               compatible = "ti,da830-ohci";
+               reg = <0x225000 0x1000>;
+               interrupts = <59>;
+               phys = <&usb_phy 1>;
+               phy-names = "usb-phy";
+       };
index 68498d5603540343ffbf6c3d72f0c8fc65dc1a83..cc6be9680a6d6859852c27f1ab0d31a898b4eb03 100644 (file)
@@ -5,11 +5,13 @@ Required properties:
      "rockchip,rk3066a-usb-phy"
      "rockchip,rk3188-usb-phy"
      "rockchip,rk3288-usb-phy"
- - rockchip,grf : phandle to the syscon managing the "general
-   register files"
  - #address-cells: should be 1
  - #size-cells: should be 0
 
+Deprecated properties:
+ - rockchip,grf : phandle to the syscon managing the "general
+   register files" - phy should be a child of the GRF instead
+
 Sub-nodes:
 Each PHY should be represented as a sub-node.
 
@@ -28,14 +30,19 @@ Optional Properties:
 
 Example:
 
-usbphy: phy {
-       compatible = "rockchip,rk3288-usb-phy";
-       rockchip,grf = <&grf>;
-       #address-cells = <1>;
-       #size-cells = <0>;
+grf: syscon@ff770000 {
+       compatible = "rockchip,rk3288-grf", "syscon", "simple-mfd";
+
+...
+
+       usbphy: phy {
+               compatible = "rockchip,rk3288-usb-phy";
+               #address-cells = <1>;
+               #size-cells = <0>;
 
-       usbphy0: usb-phy0 {
-               #phy-cells = <0>;
-               reg = <0x320>;
+               usbphy0: usb-phy0 {
+                       #phy-cells = <0>;
+                       reg = <0x320>;
+               };
        };
 };
index 936ab5b87324ce7cf022320a777342d2507dfd19..f5561ac7e17ed358ce7e6a00c7c0f8913f3464c6 100644 (file)
@@ -42,6 +42,9 @@ Optional properties:
 - auto-flow-control: one way to enable automatic flow control support. The
   driver is allowed to detect support for the capability even without this
   property.
+- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD
+  line respectively. It will use specified GPIO instead of the peripheral
+  function pin for the UART feature. If unsure, don't specify this property.
 
 Note:
 * fsl,ns16550:
@@ -63,3 +66,19 @@ Example:
                interrupts = <10>;
                reg-shift = <2>;
        };
+
+Example for OMAP UART using GPIO-based modem control signals:
+
+       uart4: serial@49042000 {
+               compatible = "ti,omap3-uart";
+               reg = <0x49042000 0x400>;
+               interrupts = <80>;
+               ti,hwmods = "uart4";
+               clock-frequency = <48000000>;
+               cts-gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
+               rts-gpios = <&gpio3 6 GPIO_ACTIVE_LOW>;
+               dtr-gpios = <&gpio1 12 GPIO_ACTIVE_LOW>;
+               dsr-gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
+               dcd-gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+               rng-gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+       };
index 528c3b90f23cb04b931be9637f850e7e315e0415..1e4000d83aee06828c974000e5122567b8fda631 100644 (file)
@@ -31,6 +31,8 @@ Required properties:
     - "renesas,hscif-r8a7794" for R8A7794 (R-Car E2) HSCIF compatible UART.
     - "renesas,scif-r8a7795" for R8A7795 (R-Car H3) SCIF compatible UART.
     - "renesas,hscif-r8a7795" for R8A7795 (R-Car H3) HSCIF compatible UART.
+    - "renesas,scif-r8a7796" for R8A7796 (R-Car M3-W) SCIF compatible UART.
+    - "renesas,hscif-r8a7796" for R8A7796 (R-Car M3-W) HSCIF compatible UART.
     - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
     - "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
     - "renesas,rcar-gen1-scif" for R-Car Gen1 SCIF compatible UART,
@@ -76,6 +78,10 @@ Optional properties:
   - dmas: Must contain a list of two references to DMA specifiers, one for
          transmission, and one for reception.
   - dma-names: Must contain a list of two DMA names, "tx" and "rx".
+  - {cts,dsr,dcd,rng,rts,dtr}-gpios: Specify GPIOs for modem lines, cfr. the
+    generic serial DT bindings in serial.txt.
+  - uart-has-rtscts: Indicates dedicated lines for RTS/CTS hardware flow
+    control, cfr. the generic serial DT bindings in serial.txt.
 
 Example:
        aliases {
diff --git a/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt b/Documentation/devicetree/bindings/timer/oxsemi,rps-timer.txt
new file mode 100644 (file)
index 0000000..3ca89cd
--- /dev/null
@@ -0,0 +1,17 @@
+Oxford Semiconductor OXNAS SoCs Family RPS Timer
+================================================
+
+Required properties:
+- compatible: Should be "oxsemi,ox810se-rps-timer"
+- reg : Specifies base physical address and size of the registers.
+- interrupts : The interrupts of the two timers
+- clocks : The phandle of the timer clock source
+
+example:
+
+timer0: timer@200 {
+       compatible = "oxsemi,ox810se-rps-timer";
+       reg = <0x200 0x40>;
+       clocks = <&rpsclk>;
+       interrupts = <4 5>;
+};
similarity index 75%
rename from Documentation/devicetree/bindings/timer/rockchip,rk3288-timer.txt
rename to Documentation/devicetree/bindings/timer/rockchip,rk-timer.txt
index 87f0b0042bae8650711729b0994766b0e2cc5e43..a41b184d553843679323b36024c7d2653ca26f4a 100644 (file)
@@ -1,7 +1,9 @@
-Rockchip rk3288 timer
+Rockchip rk timer
 
 Required properties:
-- compatible: shall be "rockchip,rk3288-timer"
+- compatible: shall be one of:
+  "rockchip,rk3288-timer" - for rk3066, rk3036, rk3188, rk322x, rk3288, rk3368
+  "rockchip,rk3399-timer" - for rk3399
 - reg: base address of the timer register starting with TIMERS CONTROL register
 - interrupts: should contain the interrupts for Timer0
 - clocks : must contain an entry for each entry in clock-names
index 1084e2bcbe1c244dea37cc6161ad7551c2911f7b..341dc67f3472c746a92e38340848a509f8140831 100644 (file)
@@ -93,7 +93,7 @@ Example:
                phys = <&usb_phy0>;
                phy-names = "usb-phy";
                vbus-supply = <&reg_usb0_vbus>;
-               gadget-itc-setting = <0x4>; /* 4 micro-frames */
+               itc-setting = <0x4>; /* 4 micro-frames */
                 /* Incremental burst of unspecified length */
                ahb-burst-config = <0x0>;
                tx-burst-size-dword = <0x10>; /* 64 bytes */
index 19233b7365e14d08bc3be06be1f7f8b31048ad67..9df456968596fbbfebde061d13a21583dab233dc 100644 (file)
@@ -14,7 +14,7 @@ Optional properties:
 - clocks : a list of phandle + clock specifier pairs
 - phys : phandle + phy specifier pair
 - phy-names : "usb"
-- resets : phandle + reset specifier pair
+- resets : a list of phandle + reset specifier pairs
 
 Example:
 
index 4ab7d43d07544f7a22b35e49f9eb691eb3e9204d..7050ce8794b9a4b3dd93b76dd9e2a6d708b468ee 100644 (file)
@@ -139,27 +139,6 @@ Examples of using the Linux-provided gdb helpers
       start_comm = "swapper/2\000\000\000\000\000\000"
     }
 
- o Dig into a radix tree data structure, such as the IRQ descriptors:
-    (gdb) print (struct irq_desc)$lx_radix_tree_lookup(irq_desc_tree, 18)
-    $6 = {
-      irq_common_data = {
-        state_use_accessors = 67584,
-        handler_data = 0x0 <__vectors_start>,
-        msi_desc = 0x0 <__vectors_start>,
-        affinity = {{
-            bits = {65535}
-          }}
-      },
-      irq_data = {
-        mask = 0,
-        irq = 18,
-        hwirq = 27,
-        common = 0xee803d80,
-        chip = 0xc0eb0854 <gic_data>,
-        domain = 0xee808000,
-        parent_data = 0x0 <__vectors_start>,
-        chip_data = 0xc0eb0854 <gic_data>
-      } <... trimmed ...>
 
 List of commands and functions
 ------------------------------
index f1d4fe4c366c487ba0705647bcafc33677b40a03..44013d23b3f05a569ce1c6ce6072c6fe78f2258f 100644 (file)
@@ -24,7 +24,7 @@ Supported chips:
                                 AW9D-MAX) (2)
        1) For revisions 2 and 3 uGuru's the driver can autodetect the
           sensortype (Volt or Temp) for bank1 sensors, for revision 1 uGuru's
-          this doesnot always work. For these uGuru's the autodection can
+          this does not always work. For these uGuru's the autodetection can
           be overridden with the bank1_types module param. For all 3 known
           revison 1 motherboards the correct use of this param is:
           bank1_types=1,1,0,0,0,0,0,2,0,0,0,0,2,0,0,1
diff --git a/Documentation/hwmon/ftsteutates b/Documentation/hwmon/ftsteutates
new file mode 100644 (file)
index 0000000..2a1bf69
--- /dev/null
@@ -0,0 +1,23 @@
+Kernel driver ftsteutates
+=====================
+
+Supported chips:
+  * FTS Teutates
+    Prefix: 'ftsteutates'
+    Addresses scanned: I2C 0x73 (7-Bit)
+
+Author: Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>
+
+
+Description
+-----------
+The BMC Teutates is the Eleventh generation of Superior System
+monitoring and thermal management solution. It is builds on the basic
+functionality of the BMC Theseus and contains several new features and
+enhancements. It can monitor up to 4 voltages, 16 temperatures and
+8 fans. It also contains an integrated watchdog which is currently
+implemented in this driver.
+
+Specification of the chip can be found here:
+ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
+ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
diff --git a/Documentation/hwmon/ina3221 b/Documentation/hwmon/ina3221
new file mode 100644 (file)
index 0000000..0ff7485
--- /dev/null
@@ -0,0 +1,35 @@
+Kernel driver ina3221
+=====================
+
+Supported chips:
+  * Texas Instruments INA3221
+    Prefix: 'ina3221'
+    Addresses: I2C 0x40 - 0x43
+    Datasheet: Publicly available at the Texas Instruments website
+               http://www.ti.com/
+
+Author: Andrew F. Davis <afd@ti.com>
+
+Description
+-----------
+
+The Texas Instruments INA3221 monitors voltage, current, and power on the high
+side of up to three D.C. power supplies. The INA3221 monitors both shunt drop
+and supply voltage, with programmable conversion times and averaging, current
+and power are calculated host-side from these.
+
+Sysfs entries
+-------------
+
+in[123]_input           Bus voltage(mV) channels
+curr[123]_input         Current(mA) measurement channels
+shunt[123]_resistor     Shunt resistance(uOhm) channels
+curr[123]_crit          Critical alert current(mA) setting, activates the
+                          corresponding alarm when the respective current
+                          is above this value
+curr[123]_crit_alarm    Critical alert current limit exceeded
+curr[123]_max           Warning alert current(mA) setting, activates the
+                          corresponding alarm when the respective current
+                          average is above this value.
+curr[123]_max_alarm     Warning alert current limit exceeded
+in[456]_input           Shunt voltage(uV) for channels 1, 2, and 3 respectively
index f7f1830a25663c6273812abe9cbddf8245a050d4..b4b671f22453e6132fc376f8f9fa81996b07707c 100644 (file)
@@ -18,10 +18,11 @@ Supported chips:
   * Maxim MAX6604
     Datasheets:
        http://datasheets.maxim-ic.com/en/ds/MAX6604.pdf
-  * Microchip MCP9804, MCP9805, MCP98242, MCP98243, MCP98244, MCP9843
+  * Microchip MCP9804, MCP9805, MCP9808, MCP98242, MCP98243, MCP98244, MCP9843
     Datasheets:
        http://ww1.microchip.com/downloads/en/DeviceDoc/22203C.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/21977b.pdf
+       http://ww1.microchip.com/downloads/en/DeviceDoc/25095A.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/21996a.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/22153c.pdf
        http://ww1.microchip.com/downloads/en/DeviceDoc/22327A.pdf
index 0616ed9758dfdd96058018191447973a54bb3a5c..8f9d570dbfec350c09abea219d334987276004b6 100644 (file)
@@ -17,7 +17,7 @@ This driver implements support for the Maxim MAX1668, MAX1805 and MAX1989
 chips.
 
 The three devices are very similar, but the MAX1805 has a reduced feature
-set; only two remote temperature inputs vs the four avaible on the other
+set; only two remote temperature inputs vs the four available on the other
 two ICs.
 
 The driver is able to distinguish between the devices and creates sysfs
diff --git a/Documentation/hwmon/sht3x b/Documentation/hwmon/sht3x
new file mode 100644 (file)
index 0000000..b0d8818
--- /dev/null
@@ -0,0 +1,76 @@
+Kernel driver sht3x
+===================
+
+Supported chips:
+  * Sensirion SHT3x-DIS
+    Prefix: 'sht3x'
+    Addresses scanned: none
+    Datasheet: http://www.sensirion.com/fileadmin/user_upload/customers/sensirion/Dokumente/Humidity/Sensirion_Humidity_Datasheet_SHT3x_DIS.pdf
+
+Author:
+  David Frey <david.frey@sensirion.com>
+  Pascal Sachs <pascal.sachs@sensirion.com>
+
+Description
+-----------
+
+This driver implements support for the Sensirion SHT3x-DIS chip, a humidity
+and temperature sensor. Temperature is measured in degrees celsius, relative
+humidity is expressed as a percentage. In the sysfs interface, all values are
+scaled by 1000, i.e. the value for 31.5 degrees celsius is 31500.
+
+The device communicates with the I2C protocol. Sensors can have the I2C
+addresses 0x44 or 0x45, depending on the wiring. See
+Documentation/i2c/instantiating-devices for methods to instantiate the device.
+
+There are two options configurable by means of sht3x_platform_data:
+1. blocking (pull the I2C clock line down while performing the measurement) or
+   non-blocking mode. Blocking mode will guarantee the fastest result but
+   the I2C bus will be busy during that time. By default, non-blocking mode
+   is used. Make sure clock-stretching works properly on your device if you
+   want to use blocking mode.
+2. high or low accuracy. High accuracy is used by default and using it is
+   strongly recommended.
+
+The sht3x sensor supports a single shot mode as well as 5 periodic measure
+modes, which can be controlled with the update_interval sysfs interface.
+The allowed update_interval in milliseconds are as follows:
+  *     0   single shot mode
+  *  2000   0.5 Hz periodic measurement
+  *  1000   1   Hz periodic measurement
+  *   500   2   Hz periodic measurement
+  *   250   4   Hz periodic measurement
+  *   100  10   Hz periodic measurement
+
+In the periodic measure mode, the sensor automatically triggers a measurement
+with the configured update interval on the chip. When a temperature or humidity
+reading exceeds the configured limits, the alert attribute is set to 1 and
+the alert pin on the sensor is set to high.
+When the temperature and humidity readings move back between the hysteresis
+values, the alert bit is set to 0 and the alert pin on the sensor is set to
+low.
+
+sysfs-Interface
+---------------
+
+temp1_input:        temperature input
+humidity1_input:    humidity input
+temp1_max:          temperature max value
+temp1_max_hyst:     temperature hysteresis value for max limit
+humidity1_max:      humidity max value
+humidity1_max_hyst: humidity hysteresis value for max limit
+temp1_min:          temperature min value
+temp1_min_hyst:     temperature hysteresis value for min limit
+humidity1_min:      humidity min value
+humidity1_min_hyst: humidity hysteresis value for min limit
+temp1_alarm:        alarm flag is set to 1 if the temperature is outside the
+                    configured limits. Alarm only works in periodic measure mode
+humidity1_alarm:    alarm flag is set to 1 if the humidity is outside the
+                    configured limits. Alarm only works in periodic measure mode
+heater_enable:      heater enable, heating element removes excess humidity from
+                    sensor
+                        0: turned off
+                        1: turned on
+update_interval:    update interval, 0 for single shot, interval in msec
+                    for periodic measurement. If the interval is not supported
+                    by the sensor, the next faster interval is chosen
index d201828d202ff8de99579ee64cd24b2ce89a391d..57f60307accc6b7ea5fb255479b3337635e67b9c 100644 (file)
@@ -15,10 +15,15 @@ increase the chances of your change being accepted.
     Documentation/SubmittingPatches
     Documentation/CodingStyle
 
-* If your patch generates checkpatch warnings, please refrain from explanations
-  such as "I don't like that coding style". Keep in mind that each unnecessary
-  warning helps hiding a real problem. If you don't like the kernel coding
-  style, don't write kernel drivers.
+* Please run your patch through 'checkpatch --strict'. There should be no
+  errors, no warnings, and few if any check messages. If there are any
+  messages, please be prepared to explain.
+
+* If your patch generates checkpatch errors, warnings, or check messages,
+  please refrain from explanations such as "I prefer that coding style".
+  Keep in mind that each unnecessary message helps hiding a real problem,
+  and a consistent coding style makes it easier for others to understand
+  and review the code.
 
 * Please test your patch thoroughly. We are not your test group.
   Sometimes a patch can not or not completely be tested because of missing
@@ -61,15 +66,30 @@ increase the chances of your change being accepted.
 
 * Make sure that all dependencies are listed in Kconfig.
 
+* Please list include files in alphabetic order.
+
+* Please align continuation lines with '(' on the previous line.
+
 * Avoid forward declarations if you can. Rearrange the code if necessary.
 
+* Avoid macros to generate groups of sensor attributes. It not only confuses
+  checkpatch, but also makes it more difficult to review the code.
+
 * Avoid calculations in macros and macro-generated functions. While such macros
   may save a line or so in the source, it obfuscates the code and makes code
   review more difficult. It may also result in code which is more complicated
   than necessary. Use inline functions or just regular functions instead.
 
+* Limit the number of kernel log messages. In general, your driver should not
+  generate an error message just because a runtime operation failed. Report
+  errors to user space instead, using an appropriate error code. Keep in mind
+  that kernel error log messages not only fill up the kernel log, but also are
+  printed synchronously, most likely with interrupt disabled, often to a serial
+  console. Excessive logging can seriously affect system performance.
+
 * Use devres functions whenever possible to allocate resources. For rationale
   and supported functions, please see Documentation/driver-model/devres.txt.
+  If a function is not supported by devres, consider using devm_add_action().
 
 * If the driver has a detect function, make sure it is silent. Debug messages
   and messages printed after a successful detection are acceptable, but it
@@ -96,8 +116,16 @@ increase the chances of your change being accepted.
   writing to it might cause a bad misconfiguration.
 
 * Make sure there are no race conditions in the probe function. Specifically,
-  completely initialize your chip first, then create sysfs entries and register
-  with the hwmon subsystem.
+  completely initialize your chip and your driver first, then register with
+  the hwmon subsystem.
+
+* Use devm_hwmon_device_register_with_groups() or, if your driver needs a remove
+  function, hwmon_device_register_with_groups() to register your driver with the
+  hwmon subsystem. Try using devm_add_action() instead of a remove function if
+  possible. Do not use hwmon_device_register().
+
+* Your driver should be buildable as module. If not, please be prepared to
+  explain why it has to be built into the kernel.
 
 * Do not provide support for deprecated sysfs attributes.
 
index 711f75e189eba003e31a3f762fa16be04d1012d0..2d9ca42213cf31e9b89d2f8af2d340a231ddc2f5 100644 (file)
@@ -22,6 +22,9 @@ Supported chips:
     Prefix: 'tmp435'
     Addresses scanned: I2C 0x48 - 0x4f
     Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
+  * Texas Instruments TMP461
+    Prefix: 'tmp461'
+    Datasheet: http://www.ti.com/product/tmp461
 
 Authors:
          Hans de Goede <hdegoede@redhat.com>
@@ -31,8 +34,8 @@ Description
 -----------
 
 This driver implements support for Texas Instruments TMP401, TMP411,
-TMP431, TMP432 and TMP435 chips. These chips implement one or two remote
-and one local temperature sensors. Temperature is measured in degrees
+TMP431, TMP432, TMP435, and TMP461 chips. These chips implement one or two
+remote and one local temperature sensors. Temperature is measured in degrees
 Celsius. Resolution of the remote sensor is 0.0625 degree. Local
 sensor resolution can be set to 0.5, 0.25, 0.125 or 0.0625 degree (not
 supported by the driver so far, so using the default resolution of 0.5
@@ -55,3 +58,10 @@ some additional features.
 
 TMP432 is compatible with TMP401 and TMP431. It supports two external
 temperature sensors.
+
+TMP461 is compatible with TMP401. It supports offset correction
+that is applied to the remote sensor.
+
+* Sensor offset values are temperature values
+
+  Exported via sysfs attribute tempX_offset
index 13f888a02a3de5cb7ecfdb2ed03eb1d721601de9..385a5ef41c17b9d0023e39d5f0683b6b76199373 100644 (file)
@@ -47,6 +47,7 @@ This document describes the Linux kernel Makefiles.
                --- 7.2 genhdr-y
                --- 7.3 destination-y
                --- 7.4 generic-y
+               --- 7.5 generated-y
 
        === 8 Kbuild Variables
        === 9 Makefile language
@@ -1319,6 +1320,19 @@ See subsequent chapter for the syntax of the Kbuild file.
                Example: termios.h
                        #include <asm-generic/termios.h>
 
+       --- 7.5 generated-y
+
+       If an architecture generates other header files alongside generic-y
+       wrappers, and not included in genhdr-y, then generated-y specifies
+       them.
+
+       This prevents them being treated as stale asm-generic wrappers and
+       removed.
+
+               Example:
+                       #arch/x86/include/asm/Kbuild
+                       generated-y += syscalls_32.h
+
 === 8 Kbuild Variables
 
 The top Makefile exports the following variables:
index 82b42c958d1c7def4eac5c9931a0e8a6f9aab6c6..17e33dbbf2264d985e2ebc34f89c99d3e01e8f3b 100644 (file)
@@ -687,6 +687,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        [SPARC64] tick
                        [X86-64] hpet,tsc
 
+       clocksource.arm_arch_timer.evtstrm=
+                       [ARM,ARM64]
+                       Format: <bool>
+                       Enable/disable the eventstream feature of the ARM
+                       architected timer so that code using WFE-based polling
+                       loops can be debugged more effectively on production
+                       systems.
+
        clearcpuid=BITNUM [X86]
                        Disable CPUID feature X for the kernel. See
                        arch/x86/include/asm/cpufeatures.h for the valid bit
@@ -1803,12 +1811,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        js=             [HW,JOY] Analog joystick
                        See Documentation/input/joystick.txt.
 
-       kaslr/nokaslr   [X86]
-                       Enable/disable kernel and module base offset ASLR
-                       (Address Space Layout Randomization) if built into
-                       the kernel. When CONFIG_HIBERNATION is selected,
-                       kASLR is disabled by default. When kASLR is enabled,
-                       hibernation will be disabled.
+       nokaslr         [KNL]
+                       When CONFIG_RANDOMIZE_BASE is set, this disables
+                       kernel and module base offset ASLR (Address Space
+                       Layout Randomization).
 
        keepinitrd      [HW,ARM]
 
index 147ae8ec836f85666110634ff5565f4016de1d80..a4d0a99de04da76d62bc0ceebae88fd5283473e4 100644 (file)
@@ -806,6 +806,41 @@ out-guess your code.  More generally, although READ_ONCE() does force
 the compiler to actually emit code for a given load, it does not force
 the compiler to use the results.
 
+In addition, control dependencies apply only to the then-clause and
+else-clause of the if-statement in question.  In particular, it does
+not necessarily apply to code following the if-statement:
+
+       q = READ_ONCE(a);
+       if (q) {
+               WRITE_ONCE(b, p);
+       } else {
+               WRITE_ONCE(b, r);
+       }
+       WRITE_ONCE(c, 1);  /* BUG: No ordering against the read from "a". */
+
+It is tempting to argue that there in fact is ordering because the
+compiler cannot reorder volatile accesses and also cannot reorder
+the writes to "b" with the condition.  Unfortunately for this line
+of reasoning, the compiler might compile the two writes to "b" as
+conditional-move instructions, as in this fanciful pseudo-assembly
+language:
+
+       ld r1,a
+       ld r2,p
+       ld r3,r
+       cmp r1,$0
+       cmov,ne r4,r2
+       cmov,eq r4,r3
+       st r4,b
+       st $1,c
+
+A weakly ordered CPU would have no dependency of any sort between the load
+from "a" and the store to "c".  The control dependencies would extend
+only to the pair of cmov instructions and the store depending on them.
+In short, control dependencies apply only to the stores in the then-clause
+and else-clause of the if-statement in question (including functions
+invoked by those two clauses), not to code following that if-statement.
+
 Finally, control dependencies do -not- provide transitivity.  This is
 demonstrated by two related examples, with the initial values of
 x and y both being zero:
@@ -869,6 +904,12 @@ In summary:
       atomic{,64}_read() can help to preserve your control dependency.
       Please see the COMPILER BARRIER section for more information.
 
+  (*) Control dependencies apply only to the then-clause and else-clause
+      of the if-statement containing the control dependency, including
+      any functions that these two clauses call.  Control dependencies
+      do -not- apply to code following the if-statement containing the
+      control dependency.
+
   (*) Control dependencies pair normally with other types of barriers.
 
   (*) Control dependencies do -not- provide transitivity.  If you
index a3683ce2a2f3ca99f32b3c2b120d3bd5d2d81683..33204604de6c678899af012d6232afec7ee6d753 100644 (file)
@@ -58,6 +58,7 @@ show up in /proc/sys/kernel:
 - panic_on_stackoverflow
 - panic_on_unrecovered_nmi
 - panic_on_warn
+- panic_on_rcu_stall
 - perf_cpu_time_max_percent
 - perf_event_paranoid
 - perf_event_max_stack
@@ -618,6 +619,17 @@ a kernel rebuild when attempting to kdump at the location of a WARN().
 
 ==============================================================
 
+panic_on_rcu_stall:
+
+When set to 1, calls panic() after RCU stall detection messages. This
+is useful to define the root cause of RCU stalls using a vmcore.
+
+0: do not panic() when RCU stall takes place, default behavior.
+
+1: panic() after printing RCU stall messages.
+
+==============================================================
+
 perf_cpu_time_max_percent:
 
 Hints to the kernel how much CPU time it should be allowed to
index 1a5a12184a358dc395874447ccc3c502d67f567b..85d0549ad84636652d1867d7d0a58c7ed1cc3c19 100644 (file)
@@ -45,7 +45,7 @@ is how we expect the compiler, application and kernel to work together.
    MPX-instrumented.
 3) The kernel detects that the CPU has MPX, allows the new prctl() to
    succeed, and notes the location of the bounds directory. Userspace is
-   expected to keep the bounds directory at that locationWe note it
+   expected to keep the bounds directory at that locationWe note it
    instead of reading it each time because the 'xsave' operation needed
    to access the bounds directory register is an expensive operation.
 4) If the application needs to spill bounds out of the 4 registers, it
@@ -167,7 +167,7 @@ If a #BR is generated due to a bounds violation caused by MPX.
 We need to decode MPX instructions to get violation address and
 set this address into extended struct siginfo.
 
-The _sigfault feild of struct siginfo is extended as follow:
+The _sigfault field of struct siginfo is extended as follow:
 
 87             /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
 88             struct {
@@ -240,5 +240,5 @@ them at the same bounds table.
 This is allowed architecturally.  See more information "Intel(R) Architecture
 Instruction Set Extensions Programming Reference" (9.3.4).
 
-However, if users did this, the kernel might be fooled in to unmaping an
+However, if users did this, the kernel might be fooled in to unmapping an
 in-use bounds table since it does not recognize sharing.
index 39d1723267036a5ba7dff02dc82e39d69a1eeae8..6a0607b99ed8780c45920676e9b122efefa77235 100644 (file)
@@ -5,7 +5,7 @@ memory, it has two choices:
     from areas other than the one we are trying to flush will be
     destroyed and must be refilled later, at some cost.
  2. Use the invlpg instruction to invalidate a single page at a
-    time.  This could potentialy cost many more instructions, but
+    time.  This could potentially cost many more instructions, but
     it is a much more precise operation, causing no collateral
     damage to other TLB entries.
 
@@ -19,7 +19,7 @@ Which method to do depends on a few things:
     work.
  3. The size of the TLB.  The larger the TLB, the more collateral
     damage we do with a full flush.  So, the larger the TLB, the
-    more attrative an individual flush looks.  Data and
+    more attractive an individual flush looks.  Data and
     instructions have separate TLBs, as do different page sizes.
  4. The microarchitecture.  The TLB has become a multi-level
     cache on modern CPUs, and the global flushes have become more
index b1fb30273286c7ae6878f6b43fecb6df8f8f1603..d0648a74fceb50659d2eb018d47178619636b74e 100644 (file)
@@ -36,7 +36,7 @@ between all CPUs.
 
 check_interval
        How often to poll for corrected machine check errors, in seconds
-       (Note output is hexademical). Default 5 minutes.  When the poller
+       (Note output is hexadecimal). Default 5 minutes.  When the poller
        finds MCEs it triggers an exponential speedup (poll more often) on
        the polling interval.  When the poller stops finding MCEs, it
        triggers an exponential backoff (poll less often) on the polling
index 5aa738346062887731679d1376747f2e99db93e9..8c7dd5957ae15fd472e57478b2d203f6a1696682 100644 (file)
@@ -39,4 +39,8 @@ memory window (this size is arbitrary, it can be raised later if needed).
 The mappings are not part of any other kernel PGD and are only available
 during EFI runtime calls.
 
+Note that if CONFIG_RANDOMIZE_MEMORY is enabled, the direct mapping of all
+physical memory, vmalloc/ioremap space and virtual memory map are randomized.
+Their order is preserved but their base will be offset early at boot time.
+
 -Andi Kleen, Jul 2004
index 6ec72cff87b10114dcc431b7f3fe811ab98b8098..92a3f42449df26aa113f1e32fad2302aea9be929 100644 (file)
@@ -1669,7 +1669,6 @@ F:        arch/arm/boot/dts/sh*
 F:     arch/arm/configs/shmobile_defconfig
 F:     arch/arm/include/debug/renesas-scif.S
 F:     arch/arm/mach-shmobile/
-F:     drivers/sh/
 F:     drivers/soc/renesas/
 F:     include/linux/soc/renesas/
 
@@ -1694,8 +1693,6 @@ S:        Maintained
 F:     drivers/edac/altera_edac.
 
 ARM/STI ARCHITECTURE
-M:     Srinivas Kandagatla <srinivas.kandagatla@gmail.com>
-M:     Maxime Coquelin <maxime.coquelin@st.com>
 M:     Patrice Chotard <patrice.chotard@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kernel@stlinux.com
@@ -1728,6 +1725,7 @@ F:        drivers/ata/ahci_st.c
 
 ARM/STM32 ARCHITECTURE
 M:     Maxime Coquelin <mcoquelin.stm32@gmail.com>
+M:     Alexandre Torgue <alexandre.torgue@st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mcoquelin/stm32.git
@@ -4477,7 +4475,7 @@ S:        Orphan
 F:     fs/efs/
 
 EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M:     Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
+M:     Douglas Miller <dougmill@linux.vnet.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/ibm/ehea/
@@ -5789,7 +5787,9 @@ R:        Hartmut Knaack <knaack.h@gmx.de>
 R:     Lars-Peter Clausen <lars@metafoo.de>
 R:     Peter Meerwald-Stadler <pmeerw@pmeerw.net>
 L:     linux-iio@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jic23/iio.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/iio/
 F:     drivers/iio/
 F:     drivers/staging/iio/
 F:     include/linux/iio/
@@ -6970,7 +6970,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git
 LINUX KERNEL DUMP TEST MODULE (LKDTM)
 M:     Kees Cook <keescook@chromium.org>
 S:     Maintained
-F:     drivers/misc/lkdtm.c
+F:     drivers/misc/lkdtm*
 
 LLC (802.2)
 M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
@@ -7023,15 +7023,23 @@ Q:      http://patchwork.linuxtv.org/project/linux-media/list/
 S:     Maintained
 F:     drivers/media/usb/dvb-usb-v2/lmedm04*
 
-LOCKDEP AND LOCKSTAT
+LOCKING PRIMITIVES
 M:     Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
 L:     linux-kernel@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
 S:     Maintained
-F:     Documentation/locking/lockdep*.txt
-F:     Documentation/locking/lockstat.txt
+F:     Documentation/locking/
 F:     include/linux/lockdep.h
+F:     include/linux/spinlock*.h
+F:     arch/*/include/asm/spinlock*.h
+F:     include/linux/rwlock*.h
+F:     include/linux/mutex*.h
+F:     arch/*/include/asm/mutex*.h
+F:     include/linux/rwsem*.h
+F:     arch/*/include/asm/rwsem.h
+F:     include/linux/seqlock.h
+F:     lib/locking*.[ch]
 F:     kernel/locking/
 
 LOGICAL DISK MANAGER SUPPORT (LDM, Windows 2000/XP/Vista Dynamic Disks)
@@ -7475,6 +7483,7 @@ Q:        http://patchwork.ozlabs.org/project/linux-mtd/list/
 T:     git git://git.infradead.org/linux-mtd.git
 T:     git git://git.infradead.org/l2-mtd.git
 S:     Maintained
+F:     Documentation/devicetree/bindings/mtd/
 F:     drivers/mtd/
 F:     include/linux/mtd/
 F:     include/uapi/mtd/
@@ -10000,6 +10009,7 @@ SERIAL DRIVERS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-serial@vger.kernel.org
 S:     Maintained
+F:     Documentation/devicetree/bindings/serial/
 F:     drivers/tty/serial/
 
 SYNOPSYS DESIGNWARE DMAC DRIVER
@@ -10860,6 +10870,7 @@ STAGING - INDUSTRIAL IO
 M:     Jonathan Cameron <jic23@kernel.org>
 L:     linux-iio@vger.kernel.org
 S:     Odd Fixes
+F:     Documentation/devicetree/bindings/staging/iio/
 F:     drivers/staging/iio/
 
 STAGING - LIRC (LINUX INFRARED REMOTE CONTROL) DRIVERS
index 0d504893df6e954b46d91007ebe0378d11820b5d..e1a5605b01fc5bcc507fb0e8398c42fc5a97f3ea 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
@@ -1040,7 +1040,7 @@ ifdef CONFIG_STACK_VALIDATION
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
-    $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev or elfutils-libelf-devel")
+    $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
     SKIP_STACK_VALIDATION := 1
     export SKIP_STACK_VALIDATION
   endif
index 572b228c44c7a80aec6eed925715c8d2ebacb00d..498933a7df97e9831ed3957349777c817f64a2b1 100644 (file)
@@ -46,10 +46,9 @@ static __inline__ void atomic_##op(int i, atomic_t * v)                      \
 }                                                                      \
 
 #define ATOMIC_OP_RETURN(op, asm_op)                                   \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
+static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)     \
 {                                                                      \
        long temp, result;                                              \
-       smp_mb();                                                       \
        __asm__ __volatile__(                                           \
        "1:     ldl_l %0,%1\n"                                          \
        "       " #asm_op " %0,%3,%2\n"                                 \
@@ -61,7 +60,23 @@ static inline int atomic_##op##_return(int i, atomic_t *v)           \
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
-       smp_mb();                                                       \
+       return result;                                                  \
+}
+
+#define ATOMIC_FETCH_OP(op, asm_op)                                    \
+static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)      \
+{                                                                      \
+       long temp, result;                                              \
+       __asm__ __volatile__(                                           \
+       "1:     ldl_l %2,%1\n"                                          \
+       "       " #asm_op " %2,%3,%0\n"                                 \
+       "       stl_c %0,%1\n"                                          \
+       "       beq %0,2f\n"                                            \
+       ".subsection 2\n"                                               \
+       "2:     br 1b\n"                                                \
+       ".previous"                                                     \
+       :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
+       :"Ir" (i), "m" (v->counter) : "memory");                        \
        return result;                                                  \
 }
 
@@ -82,10 +97,9 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v)         \
 }                                                                      \
 
 #define ATOMIC64_OP_RETURN(op, asm_op)                                 \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)  \
+static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v)  \
 {                                                                      \
        long temp, result;                                              \
-       smp_mb();                                                       \
        __asm__ __volatile__(                                           \
        "1:     ldq_l %0,%1\n"                                          \
        "       " #asm_op " %0,%3,%2\n"                                 \
@@ -97,34 +111,77 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)      \
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
-       smp_mb();                                                       \
+       return result;                                                  \
+}
+
+#define ATOMIC64_FETCH_OP(op, asm_op)                                  \
+static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)   \
+{                                                                      \
+       long temp, result;                                              \
+       __asm__ __volatile__(                                           \
+       "1:     ldq_l %2,%1\n"                                          \
+       "       " #asm_op " %2,%3,%0\n"                                 \
+       "       stq_c %0,%1\n"                                          \
+       "       beq %0,2f\n"                                            \
+       ".subsection 2\n"                                               \
+       "2:     br 1b\n"                                                \
+       ".previous"                                                     \
+       :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
+       :"Ir" (i), "m" (v->counter) : "memory");                        \
        return result;                                                  \
 }
 
 #define ATOMIC_OPS(op)                                                 \
        ATOMIC_OP(op, op##l)                                            \
        ATOMIC_OP_RETURN(op, op##l)                                     \
+       ATOMIC_FETCH_OP(op, op##l)                                      \
        ATOMIC64_OP(op, op##q)                                          \
-       ATOMIC64_OP_RETURN(op, op##q)
+       ATOMIC64_OP_RETURN(op, op##q)                                   \
+       ATOMIC64_FETCH_OP(op, op##q)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
+#define atomic_add_return_relaxed      atomic_add_return_relaxed
+#define atomic_sub_return_relaxed      atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed       atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed       atomic_fetch_sub_relaxed
+
+#define atomic64_add_return_relaxed    atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed     atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed     atomic64_fetch_sub_relaxed
+
 #define atomic_andnot atomic_andnot
 #define atomic64_andnot atomic64_andnot
 
-ATOMIC_OP(and, and)
-ATOMIC_OP(andnot, bic)
-ATOMIC_OP(or, bis)
-ATOMIC_OP(xor, xor)
-ATOMIC64_OP(and, and)
-ATOMIC64_OP(andnot, bic)
-ATOMIC64_OP(or, bis)
-ATOMIC64_OP(xor, xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm)                                            \
+       ATOMIC_OP(op, asm)                                              \
+       ATOMIC_FETCH_OP(op, asm)                                        \
+       ATOMIC64_OP(op, asm)                                            \
+       ATOMIC64_FETCH_OP(op, asm)
+
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, bis)
+ATOMIC_OPS(xor, xor)
+
+#define atomic_fetch_and_relaxed       atomic_fetch_and_relaxed
+#define atomic_fetch_andnot_relaxed    atomic_fetch_andnot_relaxed
+#define atomic_fetch_or_relaxed                atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed       atomic_fetch_xor_relaxed
+
+#define atomic64_fetch_and_relaxed     atomic64_fetch_and_relaxed
+#define atomic64_fetch_andnot_relaxed  atomic64_fetch_andnot_relaxed
+#define atomic64_fetch_or_relaxed      atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed     atomic64_fetch_xor_relaxed
 
 #undef ATOMIC_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 0131a7058778ed67fd32da6595052a0287b742fa..77873d0ad2937b130ab0a8fa737b5a61b821d087 100644 (file)
@@ -25,8 +25,8 @@ static inline void __down_read(struct rw_semaphore *sem)
 {
        long oldcount;
 #ifndef        CONFIG_SMP
-       oldcount = sem->count;
-       sem->count += RWSEM_ACTIVE_READ_BIAS;
+       oldcount = sem->count.counter;
+       sem->count.counter += RWSEM_ACTIVE_READ_BIAS;
 #else
        long temp;
        __asm__ __volatile__(
@@ -52,13 +52,13 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
        long old, new, res;
 
-       res = sem->count;
+       res = atomic_long_read(&sem->count);
        do {
                new = res + RWSEM_ACTIVE_READ_BIAS;
                if (new <= 0)
                        break;
                old = res;
-               res = cmpxchg(&sem->count, old, new);
+               res = atomic_long_cmpxchg(&sem->count, old, new);
        } while (res != old);
        return res >= 0 ? 1 : 0;
 }
@@ -67,8 +67,8 @@ static inline long ___down_write(struct rw_semaphore *sem)
 {
        long oldcount;
 #ifndef        CONFIG_SMP
-       oldcount = sem->count;
-       sem->count += RWSEM_ACTIVE_WRITE_BIAS;
+       oldcount = sem->count.counter;
+       sem->count.counter += RWSEM_ACTIVE_WRITE_BIAS;
 #else
        long temp;
        __asm__ __volatile__(
@@ -106,7 +106,7 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
  */
 static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
-       long ret = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
+       long ret = atomic_long_cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
                           RWSEM_ACTIVE_WRITE_BIAS);
        if (ret == RWSEM_UNLOCKED_VALUE)
                return 1;
@@ -117,8 +117,8 @@ static inline void __up_read(struct rw_semaphore *sem)
 {
        long oldcount;
 #ifndef        CONFIG_SMP
-       oldcount = sem->count;
-       sem->count -= RWSEM_ACTIVE_READ_BIAS;
+       oldcount = sem->count.counter;
+       sem->count.counter -= RWSEM_ACTIVE_READ_BIAS;
 #else
        long temp;
        __asm__ __volatile__(
@@ -142,8 +142,8 @@ static inline void __up_write(struct rw_semaphore *sem)
 {
        long count;
 #ifndef        CONFIG_SMP
-       sem->count -= RWSEM_ACTIVE_WRITE_BIAS;
-       count = sem->count;
+       sem->count.counter -= RWSEM_ACTIVE_WRITE_BIAS;
+       count = sem->count.counter;
 #else
        long temp;
        __asm__ __volatile__(
@@ -171,8 +171,8 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
 {
        long oldcount;
 #ifndef        CONFIG_SMP
-       oldcount = sem->count;
-       sem->count -= RWSEM_WAITING_BIAS;
+       oldcount = sem->count.counter;
+       sem->count.counter -= RWSEM_WAITING_BIAS;
 #else
        long temp;
        __asm__ __volatile__(
@@ -191,47 +191,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
-static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
-{
-#ifndef        CONFIG_SMP
-       sem->count += val;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%2,%0\n"
-       "       stq_c   %0,%1\n"
-       "       beq     %0,2f\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (temp), "=m" (sem->count)
-       :"Ir" (val), "m" (sem->count));
-#endif
-}
-
-static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
-{
-#ifndef        CONFIG_SMP
-       sem->count += val;
-       return sem->count;
-#else
-       long ret, temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%3,%2\n"
-       "       addq    %0,%3,%0\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (val), "m" (sem->count));
-
-       return ret;
-#endif
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ALPHA_RWSEM_H */
index fed9c6f44c191295d36f4ab8a9a9352883a1c5b1..a40b9fc0c6c3cafbffbf61fdde57c47afb68250d 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <linux/kernel.h>
 #include <asm/current.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
 
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 #define arch_spin_is_locked(x) ((x)->lock != 0)
-#define arch_spin_unlock_wait(x) \
-               do { cpu_relax(); } while ((x)->lock)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->lock, !VAL);
+}
 
 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
index dd683995bc9d119136807adcbf71b189714fe359..4e3c1b6b0806bf1087b36f21cf96e5dfb60715b1 100644 (file)
@@ -67,6 +67,33 @@ static inline int atomic_##op##_return(int i, atomic_t *v)           \
        return val;                                                     \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       unsigned int val, orig;                                         \
+                                                                       \
+       /*                                                              \
+        * Explicit full memory barrier needed before/after as          \
+        * LLOCK/SCOND thmeselves don't provide any such semantics      \
+        */                                                             \
+       smp_mb();                                                       \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "1:     llock   %[orig], [%[ctr]]               \n"             \
+       "       " #asm_op " %[val], %[orig], %[i]       \n"             \
+       "       scond   %[val], [%[ctr]]                \n"             \
+       "                                               \n"             \
+       : [val] "=&r"   (val),                                          \
+         [orig] "=&r" (orig)                                           \
+       : [ctr] "r"     (&v->counter),                                  \
+         [i]   "ir"    (i)                                             \
+       : "cc");                                                        \
+                                                                       \
+       smp_mb();                                                       \
+                                                                       \
+       return orig;                                                    \
+}
+
 #else  /* !CONFIG_ARC_HAS_LLSC */
 
 #ifndef CONFIG_SMP
@@ -129,25 +156,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v)                \
        return temp;                                                    \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       unsigned long flags;                                            \
+       unsigned long orig;                                             \
+                                                                       \
+       /*                                                              \
+        * spin lock/unlock provides the needed smp_mb() before/after   \
+        */                                                             \
+       atomic_ops_lock(flags);                                         \
+       orig = v->counter;                                              \
+       v->counter c_op i;                                              \
+       atomic_ops_unlock(flags);                                       \
+                                                                       \
+       return orig;                                                    \
+}
+
 #endif /* !CONFIG_ARC_HAS_LLSC */
 
 #define ATOMIC_OPS(op, c_op, asm_op)                                   \
        ATOMIC_OP(op, c_op, asm_op)                                     \
-       ATOMIC_OP_RETURN(op, c_op, asm_op)
+       ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
 #define atomic_andnot atomic_andnot
 
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(andnot, &= ~, bic)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op)                                   \
+       ATOMIC_OP(op, c_op, asm_op)                                     \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
 
-#undef SCOND_FAIL_RETRY_VAR_DEF
-#undef SCOND_FAIL_RETRY_ASM
-#undef SCOND_FAIL_RETRY_VARS
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
 
 #else /* CONFIG_ARC_PLAT_EZNPS */
 
@@ -208,22 +254,51 @@ static inline int atomic_##op##_return(int i, atomic_t *v)                \
        return temp;                                                    \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       unsigned int temp = i;                                          \
+                                                                       \
+       /* Explicit full memory barrier needed before/after */          \
+       smp_mb();                                                       \
+                                                                       \
+       __asm__ __volatile__(                                           \
+       "       mov r2, %0\n"                                           \
+       "       mov r3, %1\n"                                           \
+       "       .word %2\n"                                             \
+       "       mov %0, r2"                                             \
+       : "+r"(temp)                                                    \
+       : "r"(&v->counter), "i"(asm_op)                                 \
+       : "r2", "r3", "memory");                                        \
+                                                                       \
+       smp_mb();                                                       \
+                                                                       \
+       return temp;                                                    \
+}
+
 #define ATOMIC_OPS(op, c_op, asm_op)                                   \
        ATOMIC_OP(op, c_op, asm_op)                                     \
-       ATOMIC_OP_RETURN(op, c_op, asm_op)
+       ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
 #define atomic_sub(i, v) atomic_add(-(i), (v))
 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
 
-ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op)                                   \
+       ATOMIC_OP(op, c_op, asm_op)                                     \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
-ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
-ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
+ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
+ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
 
 #endif /* CONFIG_ARC_PLAT_EZNPS */
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index cded4a9b543822d5ff9e2099fb05c67bdfc8e84a..233d5ffe6ec779eb376e5beaf5a031fce6fa97c1 100644 (file)
 
 #define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
 #define arch_spin_lock_flags(lock, flags)      arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
-       do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->slock, !VAL);
+}
 
 #ifdef CONFIG_ARC_HAS_LLSC
 
index 4549ab255dd1c99ed8a24aab1a615a93846a74ab..98f22d2eb563aca35247400a3dd79dc0e55f03cb 100644 (file)
@@ -116,19 +116,19 @@ static struct clocksource arc_counter_gfrc = {
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static void __init arc_cs_setup_gfrc(struct device_node *node)
+static int __init arc_cs_setup_gfrc(struct device_node *node)
 {
        int exists = cpuinfo_arc700[0].extn.gfrc;
        int ret;
 
        if (WARN(!exists, "Global-64-bit-Ctr clocksource not detected"))
-               return;
+               return -ENXIO;
 
        ret = arc_get_timer_clk(node);
        if (ret)
-               return;
+               return ret;
 
-       clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
+       return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
 }
 CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
 
@@ -172,25 +172,25 @@ static struct clocksource arc_counter_rtc = {
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static void __init arc_cs_setup_rtc(struct device_node *node)
+static int __init arc_cs_setup_rtc(struct device_node *node)
 {
        int exists = cpuinfo_arc700[smp_processor_id()].extn.rtc;
        int ret;
 
        if (WARN(!exists, "Local-64-bit-Ctr clocksource not detected"))
-               return;
+               return -ENXIO;
 
        /* Local to CPU hence not usable in SMP */
        if (WARN(IS_ENABLED(CONFIG_SMP), "Local-64-bit-Ctr not usable in SMP"))
-               return;
+               return -EINVAL;
 
        ret = arc_get_timer_clk(node);
        if (ret)
-               return;
+               return ret;
 
        write_aux_reg(AUX_RTC_CTRL, 1);
 
-       clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
+       return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
 }
 CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
 
@@ -213,23 +213,23 @@ static struct clocksource arc_counter_timer1 = {
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
-static void __init arc_cs_setup_timer1(struct device_node *node)
+static int __init arc_cs_setup_timer1(struct device_node *node)
 {
        int ret;
 
        /* Local to CPU hence not usable in SMP */
        if (IS_ENABLED(CONFIG_SMP))
-               return;
+               return -EINVAL;
 
        ret = arc_get_timer_clk(node);
        if (ret)
-               return;
+               return ret;
 
        write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
        write_aux_reg(ARC_REG_TIMER1_CNT, 0);
        write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
 
-       clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
+       return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
 }
 
 /********** Clock Event Device *********/
@@ -324,20 +324,28 @@ static struct notifier_block arc_timer_cpu_nb = {
 /*
  * clockevent setup for boot CPU
  */
-static void __init arc_clockevent_setup(struct device_node *node)
+static int __init arc_clockevent_setup(struct device_node *node)
 {
        struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
        int ret;
 
-       register_cpu_notifier(&arc_timer_cpu_nb);
+       ret = register_cpu_notifier(&arc_timer_cpu_nb);
+       if (ret) {
+               pr_err("Failed to register cpu notifier");
+               return ret;
+       }
 
        arc_timer_irq = irq_of_parse_and_map(node, 0);
-       if (arc_timer_irq <= 0)
-               panic("clockevent: missing irq");
+       if (arc_timer_irq <= 0) {
+               pr_err("clockevent: missing irq");
+               return -EINVAL;
+       }
 
        ret = arc_get_timer_clk(node);
-       if (ret)
-               panic("clockevent: missing clk");
+       if (ret) {
+               pr_err("clockevent: missing clk");
+               return ret;
+       }
 
        evt->irq = arc_timer_irq;
        evt->cpumask = cpumask_of(smp_processor_id());
@@ -347,22 +355,29 @@ static void __init arc_clockevent_setup(struct device_node *node)
        /* Needs apriori irq_set_percpu_devid() done in intc map function */
        ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
                                 "Timer0 (per-cpu-tick)", evt);
-       if (ret)
-               panic("clockevent: unable to request irq\n");
+       if (ret) {
+               pr_err("clockevent: unable to request irq\n");
+               return ret;
+       }
 
        enable_percpu_irq(arc_timer_irq, 0);
+
+       return 0;
 }
 
-static void __init arc_of_timer_init(struct device_node *np)
+static int __init arc_of_timer_init(struct device_node *np)
 {
        static int init_count = 0;
+       int ret;
 
        if (!init_count) {
                init_count = 1;
-               arc_clockevent_setup(np);
+               ret = arc_clockevent_setup(np);
        } else {
-               arc_cs_setup_timer1(np);
+               ret = arc_cs_setup_timer1(np);
        }
+
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init);
 
index 90542db1220dbcff5bc7f0d27ce233389a6d780d..f0636ec949032276be07527f7f7178d068b8f6c4 100644 (file)
@@ -358,10 +358,10 @@ config ARCH_CLPS711X
        bool "Cirrus Logic CLPS711x/EP721x/EP731x-based"
        select ARCH_REQUIRE_GPIOLIB
        select AUTO_ZRELADDR
-       select CLKSRC_MMIO
        select COMMON_CLK
        select CPU_ARM720T
        select GENERIC_CLOCKEVENTS
+       select CLPS711X_TIMER
        select MFD_SYSCON
        select SOC_BUS
        help
index 8450944b28e6bb7728ccd731dcb709fa7346aa28..22f7a13e20b40e094611a10953180fd1679636cd 100644 (file)
@@ -58,8 +58,8 @@
        soc {
                ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
                          MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
-                         MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
-                         MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
+                         MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
+                         MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
 
                internal-regs {
 
index 2827e7ab5ebcd9a379f0afa390f255b2a986833d..5dd2734e67bad2328d9a9184b1c58b3a02df3ae3 100644 (file)
                };
 
                usb1: ohci@00400000 {
-                       compatible = "atmel,at91rm9200-ohci", "usb-ohci";
+                       compatible = "atmel,sama5d2-ohci", "usb-ohci";
                        reg = <0x00400000 0x100000>;
                        interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>;
                        clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
index a03e56fb5dbc7882abb90fc71298bac1c3e8a56b..ca58eb279d5541631bcc3dea5e56ef87797071ea 100644 (file)
@@ -65,8 +65,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>, <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>,
+                                <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -74,8 +75,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>, <&ahb_gates 46>,
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>,
+                                <&ahb_gates 46>,
                                 <&dram_gates 25>, <&dram_gates 26>;
                        status = "disabled";
                };
@@ -84,9 +86,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_fe0-de_be0-lcd0";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
-                                <&ahb_gates 46>, <&dram_gates 25>,
-                                <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 44>, <&ahb_gates 46>,
+                                <&dram_gates 25>, <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -94,8 +96,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_fe0-de_be0-lcd0-tve0";
-                       clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
-                                <&ahb_gates 44>, <&ahb_gates 46>,
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+                                <&ahb_gates 36>, <&ahb_gates 44>,
+                                <&ahb_gates 46>,
                                 <&dram_gates 5>, <&dram_gates 25>, <&dram_gates 26>;
                        status = "disabled";
                };
index bddd0de88af6be1d3e68b027b644a56e5e0ee61b..367f3301249364e7ad47e4c2e594ebfe6ecd88fc 100644 (file)
@@ -65,8 +65,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>;
                        status = "disabled";
                };
 
@@ -74,7 +74,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 44>;
                        status = "disabled";
                };
 
@@ -82,8 +83,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-tve0";
-                       clocks = <&pll5 1>, <&ahb_gates 34>, <&ahb_gates 36>,
-                                <&ahb_gates 44>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 34>,
+                                <&ahb_gates 36>, <&ahb_gates 44>;
                        status = "disabled";
                };
        };
index a8d8b4582397a2dee85929aa43d955efce5009f2..f694482bdeb64ccf1f21ffaa69d3fb3f1027ea0b 100644 (file)
@@ -52,7 +52,7 @@
 
 / {
        model = "NextThing C.H.I.P.";
-       compatible = "nextthing,chip", "allwinner,sun5i-r8";
+       compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
 
        aliases {
                i2c0 = &i2c0;
index febdf4c72fb013d3a2c222a20d4f30cf149d2513..2c34bbbb95700a4c1af75ffb44d92a312b927dea 100644 (file)
@@ -67,8 +67,9 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
-                                <&ahb_gates 44>, <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 43>, <&ahb_gates 44>,
+                                <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -76,8 +77,8 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 44>,
-                                <&dram_gates 26>;
+                       clocks = <&pll3>, <&pll5 1>, <&ahb_gates 36>,
+                                <&ahb_gates 44>, <&dram_gates 26>;
                        status = "disabled";
                };
 
@@ -85,7 +86,7 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-tve0";
-                       clocks = <&pll5 1>,
+                       clocks = <&pll3>, <&pll5 1>,
                                 <&ahb_gates 34>, <&ahb_gates 36>, <&ahb_gates 44>,
                                 <&dram_gates 5>, <&dram_gates 26>;
                        status = "disabled";
                pll3x2: pll3x2_clk {
                        #clock-cells = <0>;
                        compatible = "fixed-factor-clock";
+                       clocks = <&pll3>;
                        clock-div = <1>;
                        clock-mult = <2>;
                        clock-output-names = "pll3-2x";
                pll7x2: pll7x2_clk {
                        #clock-cells = <0>;
                        compatible = "fixed-factor-clock";
+                       clocks = <&pll7>;
                        clock-div = <1>;
                        clock-mult = <2>;
                        clock-output-names = "pll7-2x";
index 1eca3b28ac6480e8b1d628cfebfb2c58ebd24280..b6da15d823a61bcbc8d77e6c7d295306795cef09 100644 (file)
 
                                ldo5_reg: ldo5 {
                                        regulator-name = "vddio_sdmmc,avdd_vdac";
-                                       regulator-min-microvolt = <3300000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <3300000>;
                                        regulator-always-on;
                                };
 
        sdhci@78000000 {
                status = "okay";
+               vqmmc-supply = <&ldo5_reg>;
                cd-gpios = <&gpio TEGRA_GPIO(I, 5) GPIO_ACTIVE_LOW>;
                wp-gpios = <&gpio TEGRA_GPIO(T, 3) GPIO_ACTIVE_HIGH>;
                power-gpios = <&gpio TEGRA_GPIO(D, 7) GPIO_ACTIVE_HIGH>;
index 9e10c4567eb4dd3c898486f5fc34777c3760adc9..66d0e215a773cb66d3baaa5a08cbc91056fe00be 100644 (file)
@@ -77,8 +77,36 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)   \
        return result;                                                  \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
+static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)      \
+{                                                                      \
+       unsigned long tmp;                                              \
+       int result, val;                                                \
+                                                                       \
+       prefetchw(&v->counter);                                         \
+                                                                       \
+       __asm__ __volatile__("@ atomic_fetch_" #op "\n"                 \
+"1:    ldrex   %0, [%4]\n"                                             \
+"      " #asm_op "     %1, %0, %5\n"                                   \
+"      strex   %2, %1, [%4]\n"                                         \
+"      teq     %2, #0\n"                                               \
+"      bne     1b"                                                     \
+       : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)  \
+       : "r" (&v->counter), "Ir" (i)                                   \
+       : "cc");                                                        \
+                                                                       \
+       return result;                                                  \
+}
+
 #define atomic_add_return_relaxed      atomic_add_return_relaxed
 #define atomic_sub_return_relaxed      atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed       atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed       atomic_fetch_sub_relaxed
+
+#define atomic_fetch_and_relaxed       atomic_fetch_and_relaxed
+#define atomic_fetch_andnot_relaxed    atomic_fetch_andnot_relaxed
+#define atomic_fetch_or_relaxed                atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed       atomic_fetch_xor_relaxed
 
 static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
 {
@@ -159,6 +187,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v)         \
        return val;                                                     \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       unsigned long flags;                                            \
+       int val;                                                        \
+                                                                       \
+       raw_local_irq_save(flags);                                      \
+       val = v->counter;                                               \
+       v->counter c_op i;                                              \
+       raw_local_irq_restore(flags);                                   \
+                                                                       \
+       return val;                                                     \
+}
+
 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
@@ -187,19 +229,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #define ATOMIC_OPS(op, c_op, asm_op)                                   \
        ATOMIC_OP(op, c_op, asm_op)                                     \
-       ATOMIC_OP_RETURN(op, c_op, asm_op)
+       ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
 #define atomic_andnot atomic_andnot
 
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(andnot, &= ~, bic)
-ATOMIC_OP(or,  |=, orr)
-ATOMIC_OP(xor, ^=, eor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op)                                   \
+       ATOMIC_OP(op, c_op, asm_op)                                     \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(andnot, &= ~, bic)
+ATOMIC_OPS(or,  |=, orr)
+ATOMIC_OPS(xor, ^=, eor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
@@ -317,24 +366,61 @@ atomic64_##op##_return_relaxed(long long i, atomic64_t *v)                \
        return result;                                                  \
 }
 
+#define ATOMIC64_FETCH_OP(op, op1, op2)                                        \
+static inline long long                                                        \
+atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v)              \
+{                                                                      \
+       long long result, val;                                          \
+       unsigned long tmp;                                              \
+                                                                       \
+       prefetchw(&v->counter);                                         \
+                                                                       \
+       __asm__ __volatile__("@ atomic64_fetch_" #op "\n"               \
+"1:    ldrexd  %0, %H0, [%4]\n"                                        \
+"      " #op1 " %Q1, %Q0, %Q5\n"                                       \
+"      " #op2 " %R1, %R0, %R5\n"                                       \
+"      strexd  %2, %1, %H1, [%4]\n"                                    \
+"      teq     %2, #0\n"                                               \
+"      bne     1b"                                                     \
+       : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)  \
+       : "r" (&v->counter), "r" (i)                                    \
+       : "cc");                                                        \
+                                                                       \
+       return result;                                                  \
+}
+
 #define ATOMIC64_OPS(op, op1, op2)                                     \
        ATOMIC64_OP(op, op1, op2)                                       \
-       ATOMIC64_OP_RETURN(op, op1, op2)
+       ATOMIC64_OP_RETURN(op, op1, op2)                                \
+       ATOMIC64_FETCH_OP(op, op1, op2)
 
 ATOMIC64_OPS(add, adds, adc)
 ATOMIC64_OPS(sub, subs, sbc)
 
 #define atomic64_add_return_relaxed    atomic64_add_return_relaxed
 #define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed     atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed     atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, op1, op2)                                     \
+       ATOMIC64_OP(op, op1, op2)                                       \
+       ATOMIC64_FETCH_OP(op, op1, op2)
 
 #define atomic64_andnot atomic64_andnot
 
-ATOMIC64_OP(and, and, and)
-ATOMIC64_OP(andnot, bic, bic)
-ATOMIC64_OP(or,  orr, orr)
-ATOMIC64_OP(xor, eor, eor)
+ATOMIC64_OPS(and, and, and)
+ATOMIC64_OPS(andnot, bic, bic)
+ATOMIC64_OPS(or,  orr, orr)
+ATOMIC64_OPS(xor, eor, eor)
+
+#define atomic64_fetch_and_relaxed     atomic64_fetch_and_relaxed
+#define atomic64_fetch_andnot_relaxed  atomic64_fetch_andnot_relaxed
+#define atomic64_fetch_or_relaxed      atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed     atomic64_fetch_xor_relaxed
 
 #undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
index a708fa1f090579228363f8a5f0427db16d31b3a1..766bf9b781601f285bd648788ee53202ebcb4550 100644 (file)
@@ -28,10 +28,10 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
 #define arch_efi_call_virt_setup()     efi_virtmap_load()
 #define arch_efi_call_virt_teardown()  efi_virtmap_unload()
 
-#define arch_efi_call_virt(f, args...)                                 \
+#define arch_efi_call_virt(p, f, args...)                              \
 ({                                                                     \
        efi_##f##_t *__f;                                               \
-       __f = efi.systab->runtime->f;                                   \
+       __f = p->f;                                                     \
        __f(args);                                                      \
 })
 
index 0fa418463f49e95696c6d6b7ea9ac8d3d2798647..4bec4544207243d477b2e326b50119cc842526fe 100644 (file)
@@ -6,6 +6,8 @@
 #endif
 
 #include <linux/prefetch.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
 
 /*
  * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
@@ -50,8 +52,21 @@ static inline void dsb_sev(void)
  * memory.
  */
 
-#define arch_spin_unlock_wait(lock) \
-       do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       u16 owner = READ_ONCE(lock->tickets.owner);
+
+       for (;;) {
+               arch_spinlock_t tmp = READ_ONCE(*lock);
+
+               if (tmp.tickets.owner == tmp.tickets.next ||
+                   tmp.tickets.owner != owner)
+                       break;
+
+               wfe();
+       }
+       smp_acquire__after_ctrl_dep();
+}
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
index df3f60cb1168aff33ed9c2df530439ccfedb1a72..a2b3eb313a25ccb2284edfc8ef2021c3e646c786 100644 (file)
@@ -139,8 +139,8 @@ struct kvm_arch_memory_slot {
 #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
 
 #define KVM_REG_ARM_TIMER_CTL          ARM_CP15_REG32(0, 14, 3, 1)
-#define KVM_REG_ARM_TIMER_CNT          ARM_CP15_REG64(1, 14) 
-#define KVM_REG_ARM_TIMER_CVAL         ARM_CP15_REG64(3, 14) 
+#define KVM_REG_ARM_TIMER_CNT          ARM_CP15_REG64(1, 14)
+#define KVM_REG_ARM_TIMER_CVAL         ARM_CP15_REG64(3, 14)
 
 /* Normal registers are mapped as coprocessor 16. */
 #define KVM_REG_ARM_CORE               (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
index 1bfa7a7f55336119bfb647eb1e419fe81c086a05..b6ec65e680091817dd8e139fe7373addc98226b2 100644 (file)
@@ -390,7 +390,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)
 }
 
 #ifdef CONFIG_OF
-static void __init twd_local_timer_of_register(struct device_node *np)
+static int __init twd_local_timer_of_register(struct device_node *np)
 {
        int err;
 
@@ -410,6 +410,7 @@ static void __init twd_local_timer_of_register(struct device_node *np)
 
 out:
        WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
+       return err;
 }
 CLOCKSOURCE_OF_DECLARE(arm_twd_a9, "arm,cortex-a9-twd-timer", twd_local_timer_of_register);
 CLOCKSOURCE_OF_DECLARE(arm_twd_a5, "arm,cortex-a5-twd-timer", twd_local_timer_of_register);
index 68ab6412392a44b84f4519ff04d725a3505f9bcf..4f1709b318226aa9c65c920106ac310d055e30f7 100644 (file)
@@ -89,6 +89,7 @@ config ARCH_BCM_MOBILE
        select HAVE_ARM_ARCH_TIMER
        select PINCTRL
        select ARCH_BCM_MOBILE_SMP if SMP
+       select BCM_KONA_TIMER
        help
          This enables support for systems based on Broadcom mobile SoCs.
 
@@ -143,6 +144,7 @@ config ARCH_BCM2835
        select ARM_TIMER_SP804
        select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
        select CLKSRC_OF
+       select BCM2835_TIMER
        select PINCTRL
        select PINCTRL_BCM2835
        help
index b2a85ba13f088fb451377fa09d800df34e926846..291262e5aeaf3a14682ff3cb0ae914fd76e7615f 100644 (file)
@@ -20,7 +20,7 @@ if ARCH_INTEGRATOR
 
 config ARCH_INTEGRATOR_AP
        bool "Support Integrator/AP and Integrator/PP2 platforms"
-       select CLKSRC_MMIO
+       select INTEGRATOR_AP_TIMER
        select MIGHT_HAVE_PCI
        select SERIAL_AMBA_PL010 if TTY
        select SERIAL_AMBA_PL010_CONSOLE if TTY
index ea955f6db8b7edd77324879de7b3551aac118ea5..bac577badc7e90b5511e7bfba874809d12f2fa98 100644 (file)
@@ -4,7 +4,7 @@ config ARCH_KEYSTONE
        depends on ARM_PATCH_PHYS_VIRT
        select ARM_GIC
        select HAVE_ARM_ARCH_TIMER
-       select CLKSRC_MMIO
+       select KEYSTONE_TIMER
        select ARM_ERRATA_798181 if SMP
        select COMMON_CLK_KEYSTONE
        select ARCH_SUPPORTS_BIG_ENDIAN
index 180d9d2167195936ccb2062b1e8035a8adefef78..ddc79cea32d3ee6798b9c0cd228a5995a8c268e4 100644 (file)
@@ -3,7 +3,7 @@ menuconfig ARCH_MOXART
        depends on ARCH_MULTI_V4
        select CPU_FA526
        select ARM_DMA_MEM_BUFFERABLE
-       select CLKSRC_MMIO
+       select MOXART_TIMER
        select GENERIC_IRQ_CHIP
        select ARCH_REQUIRE_GPIOLIB
        select PHYLIB if NETDEVICES
index ecf9e0c3b107808752fe49af062b8758e1784986..e53c6cfcab51cd12c798fd11d663686c77761b02 100644 (file)
@@ -7,9 +7,15 @@ CFLAGS_pmsu.o                  := -march=armv7-a
 obj-$(CONFIG_MACH_MVEBU_ANY)    += system-controller.o mvebu-soc-id.o
 
 ifeq ($(CONFIG_MACH_MVEBU_V7),y)
-obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o pm.o pm-board.o
+obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
+
+obj-$(CONFIG_PM)                += pm.o pm-board.o
 obj-$(CONFIG_SMP)               += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
 endif
 
 obj-$(CONFIG_MACH_DOVE)                 += dove.o
-obj-$(CONFIG_MACH_KIRKWOOD)     += kirkwood.o kirkwood-pm.o
+
+ifeq ($(CONFIG_MACH_KIRKWOOD),y)
+obj-y                           += kirkwood.o
+obj-$(CONFIG_PM)                += kirkwood-pm.o
+endif
index 7e989d61159c384df1de62b27156ddf22d8fa44f..e80f0dde218919dab8d7a2b5873a4962755f3ee2 100644 (file)
@@ -162,22 +162,16 @@ exit:
 }
 
 /*
- * This ioremap hook is used on Armada 375/38x to ensure that PCIe
- * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
- * is needed as a workaround for a deadlock issue between the PCIe
- * interface and the cache controller.
+ * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
+ * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
+ * needed for the HW I/O coherency mechanism to work properly without
+ * deadlock.
  */
 static void __iomem *
-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
-                             unsigned int mtype, void *caller)
+armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
+                        unsigned int mtype, void *caller)
 {
-       struct resource pcie_mem;
-
-       mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
-
-       if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
-               mtype = MT_UNCACHED;
-
+       mtype = MT_UNCACHED;
        return __arm_ioremap_caller(phys_addr, size, mtype, caller);
 }
 
@@ -186,7 +180,8 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
        struct device_node *cache_dn;
 
        coherency_cpu_base = of_iomap(np, 0);
-       arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
+       arch_ioremap_caller = armada_wa_ioremap_caller;
+       pci_ioremap_set_mem_type(MT_UNCACHED);
 
        /*
         * We should switch the PL310 to I/O coherency mode only if
index 84794137b17500da75487a5e394be6bea0f30495..68a3a9ec605dbf083b0ff747cf64c4bc3a5e1f0f 100644 (file)
@@ -16,7 +16,7 @@ config ARCH_MXS
        bool "Freescale MXS (i.MX23, i.MX28) support"
        depends on ARCH_MULTI_V5
        select ARCH_REQUIRE_GPIOLIB
-       select CLKSRC_MMIO
+       select MXS_TIMER
        select PINCTRL
        select SOC_BUS
        select SOC_IMX23
index bc41f26c1a1208a7beb0fe2390229a54ef01eff6..d4985305cab2c5c4838139d05571c7665eba2238 100644 (file)
@@ -7,5 +7,6 @@ config ARCH_NSPIRE
        select ARM_AMBA
        select ARM_VIC
        select ARM_TIMER_SP804
+       select NSPIRE_TIMER
        help
          This enables support for systems using the TI-NSPIRE CPU
index 0cf4426183cff99718d63975dfcb01453b45f56b..9e938f2961cfc1233e6b604132a62c1c814e3b6e 100644 (file)
@@ -28,6 +28,7 @@ config ARCH_ATLAS7
        default y
        select ARM_GIC
        select CPU_V7
+       select ATLAS7_TIMER
        select HAVE_ARM_SCU if SMP
        select HAVE_SMP
        help
@@ -38,6 +39,7 @@ config ARCH_PRIMA2
        default y
        select SIRF_IRQ
        select ZONE_DMA
+       select PRIMA2_TIMER
        help
           Support for CSR SiRFSoC ARM Cortex A9 Platform
 
index 301a98498453d40915c2292da3d844d38bcce3e5..4fdc3425ffbd2d12120d3a322b34063c1478e06e 100644 (file)
@@ -4,7 +4,7 @@ menuconfig ARCH_U300
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
        select ARM_VIC
-       select CLKSRC_MMIO
+       select U300_TIMER
        select CPU_ARM926T
        select HAVE_TCM
        select PINCTRL
index d7f8e06910bcada5899399b87d3a8a926813eb53..188bbeab92b96d96f91eb9a6de2c12cd8687a310 100644 (file)
                interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
        };
 
+       rktimer: rktimer@ff850000 {
+               compatible = "rockchip,rk3399-timer";
+               reg = <0x0 0xff850000 0x0 0x1000>;
+               interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&cru PCLK_TIMER0>, <&cru SCLK_TIMER00>;
+               clock-names = "pclk", "timer";
+       };
+
        spdif: spdif@ff870000 {
                compatible = "rockchip,rk3399-spdif";
                reg = <0x0 0xff870000 0x0 0x1000>;
index f3a3586a421c8869d6e88219c686491d71e2ecd5..c0235e0ff8493fc516109d1e2e5cbaa647fd27dc 100644 (file)
 #define atomic_dec_return_release(v)   atomic_sub_return_release(1, (v))
 #define atomic_dec_return(v)           atomic_sub_return(1, (v))
 
+#define atomic_fetch_add_relaxed       atomic_fetch_add_relaxed
+#define atomic_fetch_add_acquire       atomic_fetch_add_acquire
+#define atomic_fetch_add_release       atomic_fetch_add_release
+#define atomic_fetch_add               atomic_fetch_add
+
+#define atomic_fetch_sub_relaxed       atomic_fetch_sub_relaxed
+#define atomic_fetch_sub_acquire       atomic_fetch_sub_acquire
+#define atomic_fetch_sub_release       atomic_fetch_sub_release
+#define atomic_fetch_sub               atomic_fetch_sub
+
+#define atomic_fetch_and_relaxed       atomic_fetch_and_relaxed
+#define atomic_fetch_and_acquire       atomic_fetch_and_acquire
+#define atomic_fetch_and_release       atomic_fetch_and_release
+#define atomic_fetch_and               atomic_fetch_and
+
+#define atomic_fetch_andnot_relaxed    atomic_fetch_andnot_relaxed
+#define atomic_fetch_andnot_acquire    atomic_fetch_andnot_acquire
+#define atomic_fetch_andnot_release    atomic_fetch_andnot_release
+#define atomic_fetch_andnot            atomic_fetch_andnot
+
+#define atomic_fetch_or_relaxed                atomic_fetch_or_relaxed
+#define atomic_fetch_or_acquire                atomic_fetch_or_acquire
+#define atomic_fetch_or_release                atomic_fetch_or_release
+#define atomic_fetch_or                        atomic_fetch_or
+
+#define atomic_fetch_xor_relaxed       atomic_fetch_xor_relaxed
+#define atomic_fetch_xor_acquire       atomic_fetch_xor_acquire
+#define atomic_fetch_xor_release       atomic_fetch_xor_release
+#define atomic_fetch_xor               atomic_fetch_xor
+
 #define atomic_xchg_relaxed(v, new)    xchg_relaxed(&((v)->counter), (new))
 #define atomic_xchg_acquire(v, new)    xchg_acquire(&((v)->counter), (new))
 #define atomic_xchg_release(v, new)    xchg_release(&((v)->counter), (new))
 #define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
 #define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
 
+#define atomic64_fetch_add_relaxed     atomic64_fetch_add_relaxed
+#define atomic64_fetch_add_acquire     atomic64_fetch_add_acquire
+#define atomic64_fetch_add_release     atomic64_fetch_add_release
+#define atomic64_fetch_add             atomic64_fetch_add
+
+#define atomic64_fetch_sub_relaxed     atomic64_fetch_sub_relaxed
+#define atomic64_fetch_sub_acquire     atomic64_fetch_sub_acquire
+#define atomic64_fetch_sub_release     atomic64_fetch_sub_release
+#define atomic64_fetch_sub             atomic64_fetch_sub
+
+#define atomic64_fetch_and_relaxed     atomic64_fetch_and_relaxed
+#define atomic64_fetch_and_acquire     atomic64_fetch_and_acquire
+#define atomic64_fetch_and_release     atomic64_fetch_and_release
+#define atomic64_fetch_and             atomic64_fetch_and
+
+#define atomic64_fetch_andnot_relaxed  atomic64_fetch_andnot_relaxed
+#define atomic64_fetch_andnot_acquire  atomic64_fetch_andnot_acquire
+#define atomic64_fetch_andnot_release  atomic64_fetch_andnot_release
+#define atomic64_fetch_andnot          atomic64_fetch_andnot
+
+#define atomic64_fetch_or_relaxed      atomic64_fetch_or_relaxed
+#define atomic64_fetch_or_acquire      atomic64_fetch_or_acquire
+#define atomic64_fetch_or_release      atomic64_fetch_or_release
+#define atomic64_fetch_or              atomic64_fetch_or
+
+#define atomic64_fetch_xor_relaxed     atomic64_fetch_xor_relaxed
+#define atomic64_fetch_xor_acquire     atomic64_fetch_xor_acquire
+#define atomic64_fetch_xor_release     atomic64_fetch_xor_release
+#define atomic64_fetch_xor             atomic64_fetch_xor
+
 #define atomic64_xchg_relaxed          atomic_xchg_relaxed
 #define atomic64_xchg_acquire          atomic_xchg_acquire
 #define atomic64_xchg_release          atomic_xchg_release
index f61c84f6ba021aa9cd149c8b927680d86f6328b7..f819fdcff1accf694cc0ec7428096fe41aef7f89 100644 (file)
@@ -77,26 +77,57 @@ __LL_SC_PREFIX(atomic_##op##_return##name(int i, atomic_t *v))              \
 }                                                                      \
 __LL_SC_EXPORT(atomic_##op##_return##name);
 
+#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)            \
+__LL_SC_INLINE int                                                     \
+__LL_SC_PREFIX(atomic_fetch_##op##name(int i, atomic_t *v))            \
+{                                                                      \
+       unsigned long tmp;                                              \
+       int val, result;                                                \
+                                                                       \
+       asm volatile("// atomic_fetch_" #op #name "\n"                  \
+"      prfm    pstl1strm, %3\n"                                        \
+"1:    ld" #acq "xr    %w0, %3\n"                                      \
+"      " #asm_op "     %w1, %w0, %w4\n"                                \
+"      st" #rel "xr    %w2, %w1, %3\n"                                 \
+"      cbnz    %w2, 1b\n"                                              \
+"      " #mb                                                           \
+       : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
+       : "Ir" (i)                                                      \
+       : cl);                                                          \
+                                                                       \
+       return result;                                                  \
+}                                                                      \
+__LL_SC_EXPORT(atomic_fetch_##op##name);
+
 #define ATOMIC_OPS(...)                                                        \
        ATOMIC_OP(__VA_ARGS__)                                          \
-       ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)
-
-#define ATOMIC_OPS_RLX(...)                                            \
-       ATOMIC_OPS(__VA_ARGS__)                                         \
+       ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)\
        ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
        ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
-       ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)
+       ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)\
+       ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
+       ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
+       ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
+       ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
 
-ATOMIC_OPS_RLX(add, add)
-ATOMIC_OPS_RLX(sub, sub)
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(...)                                                        \
+       ATOMIC_OP(__VA_ARGS__)                                          \
+       ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
+       ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
+       ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
+       ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
 
-ATOMIC_OP(and, and)
-ATOMIC_OP(andnot, bic)
-ATOMIC_OP(or, orr)
-ATOMIC_OP(xor, eor)
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, orr)
+ATOMIC_OPS(xor, eor)
 
-#undef ATOMIC_OPS_RLX
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
@@ -140,26 +171,57 @@ __LL_SC_PREFIX(atomic64_##op##_return##name(long i, atomic64_t *v))       \
 }                                                                      \
 __LL_SC_EXPORT(atomic64_##op##_return##name);
 
+#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op)          \
+__LL_SC_INLINE long                                                    \
+__LL_SC_PREFIX(atomic64_fetch_##op##name(long i, atomic64_t *v))       \
+{                                                                      \
+       long result, val;                                               \
+       unsigned long tmp;                                              \
+                                                                       \
+       asm volatile("// atomic64_fetch_" #op #name "\n"                \
+"      prfm    pstl1strm, %3\n"                                        \
+"1:    ld" #acq "xr    %0, %3\n"                                       \
+"      " #asm_op "     %1, %0, %4\n"                                   \
+"      st" #rel "xr    %w2, %1, %3\n"                                  \
+"      cbnz    %w2, 1b\n"                                              \
+"      " #mb                                                           \
+       : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)   \
+       : "Ir" (i)                                                      \
+       : cl);                                                          \
+                                                                       \
+       return result;                                                  \
+}                                                                      \
+__LL_SC_EXPORT(atomic64_fetch_##op##name);
+
 #define ATOMIC64_OPS(...)                                              \
        ATOMIC64_OP(__VA_ARGS__)                                        \
-       ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)
-
-#define ATOMIC64_OPS_RLX(...)                                          \
-       ATOMIC64_OPS(__VA_ARGS__)                                       \
+       ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)      \
        ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)      \
        ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)      \
-       ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)
+       ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)      \
+       ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
+       ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
+       ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
+       ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
 
-ATOMIC64_OPS_RLX(add, add)
-ATOMIC64_OPS_RLX(sub, sub)
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(...)                                              \
+       ATOMIC64_OP(__VA_ARGS__)                                        \
+       ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)      \
+       ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)      \
+       ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)      \
+       ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
 
-ATOMIC64_OP(and, and)
-ATOMIC64_OP(andnot, bic)
-ATOMIC64_OP(or, orr)
-ATOMIC64_OP(xor, eor)
+ATOMIC64_OPS(and, and)
+ATOMIC64_OPS(andnot, bic)
+ATOMIC64_OPS(or, orr)
+ATOMIC64_OPS(xor, eor)
 
-#undef ATOMIC64_OPS_RLX
 #undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
index 39c1d340fec59136b8ddd6e3ac3f39354289189f..b5890be8f257e5b13bf5191f27b6f71f334004de 100644 (file)
 #endif
 
 #define __LL_SC_ATOMIC(op)     __LL_SC_CALL(atomic_##op)
-
-static inline void atomic_andnot(int i, atomic_t *v)
-{
-       register int w0 asm ("w0") = i;
-       register atomic_t *x1 asm ("x1") = v;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
-       "       stclr   %w[i], %[v]\n")
-       : [i] "+r" (w0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
+#define ATOMIC_OP(op, asm_op)                                          \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       register int w0 asm ("w0") = i;                                 \
+       register atomic_t *x1 asm ("x1") = v;                           \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op),          \
+"      " #asm_op "     %w[i], %[v]\n")                                 \
+       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : __LL_SC_CLOBBERS);                                            \
 }
 
-static inline void atomic_or(int i, atomic_t *v)
-{
-       register int w0 asm ("w0") = i;
-       register atomic_t *x1 asm ("x1") = v;
+ATOMIC_OP(andnot, stclr)
+ATOMIC_OP(or, stset)
+ATOMIC_OP(xor, steor)
+ATOMIC_OP(add, stadd)
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
-       "       stset   %w[i], %[v]\n")
-       : [i] "+r" (w0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
-}
+#undef ATOMIC_OP
 
-static inline void atomic_xor(int i, atomic_t *v)
-{
-       register int w0 asm ("w0") = i;
-       register atomic_t *x1 asm ("x1") = v;
-
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
-       "       steor   %w[i], %[v]\n")
-       : [i] "+r" (w0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
+#define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)                   \
+static inline int atomic_fetch_##op##name(int i, atomic_t *v)          \
+{                                                                      \
+       register int w0 asm ("w0") = i;                                 \
+       register atomic_t *x1 asm ("x1") = v;                           \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       __LL_SC_ATOMIC(fetch_##op##name),                               \
+       /* LSE atomics */                                               \
+"      " #asm_op #mb " %w[i], %w[i], %[v]")                            \
+       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
+                                                                       \
+       return w0;                                                      \
 }
 
-static inline void atomic_add(int i, atomic_t *v)
-{
-       register int w0 asm ("w0") = i;
-       register atomic_t *x1 asm ("x1") = v;
+#define ATOMIC_FETCH_OPS(op, asm_op)                                   \
+       ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)                       \
+       ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")             \
+       ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")             \
+       ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
-       "       stadd   %w[i], %[v]\n")
-       : [i] "+r" (w0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
-}
+ATOMIC_FETCH_OPS(andnot, ldclr)
+ATOMIC_FETCH_OPS(or, ldset)
+ATOMIC_FETCH_OPS(xor, ldeor)
+ATOMIC_FETCH_OPS(add, ldadd)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_FETCH_OPS
 
 #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)                          \
 static inline int atomic_add_return##name(int i, atomic_t *v)          \
@@ -119,6 +122,33 @@ static inline void atomic_and(int i, atomic_t *v)
        : __LL_SC_CLOBBERS);
 }
 
+#define ATOMIC_FETCH_OP_AND(name, mb, cl...)                           \
+static inline int atomic_fetch_and##name(int i, atomic_t *v)           \
+{                                                                      \
+       register int w0 asm ("w0") = i;                                 \
+       register atomic_t *x1 asm ("x1") = v;                           \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       "       nop\n"                                                  \
+       __LL_SC_ATOMIC(fetch_and##name),                                \
+       /* LSE atomics */                                               \
+       "       mvn     %w[i], %w[i]\n"                                 \
+       "       ldclr" #mb "    %w[i], %w[i], %[v]")                    \
+       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
+                                                                       \
+       return w0;                                                      \
+}
+
+ATOMIC_FETCH_OP_AND(_relaxed,   )
+ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
+ATOMIC_FETCH_OP_AND(_release,  l, "memory")
+ATOMIC_FETCH_OP_AND(        , al, "memory")
+
+#undef ATOMIC_FETCH_OP_AND
+
 static inline void atomic_sub(int i, atomic_t *v)
 {
        register int w0 asm ("w0") = i;
@@ -164,57 +194,87 @@ ATOMIC_OP_SUB_RETURN(_release,  l, "memory")
 ATOMIC_OP_SUB_RETURN(        , al, "memory")
 
 #undef ATOMIC_OP_SUB_RETURN
-#undef __LL_SC_ATOMIC
-
-#define __LL_SC_ATOMIC64(op)   __LL_SC_CALL(atomic64_##op)
-
-static inline void atomic64_andnot(long i, atomic64_t *v)
-{
-       register long x0 asm ("x0") = i;
-       register atomic64_t *x1 asm ("x1") = v;
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
-       "       stclr   %[i], %[v]\n")
-       : [i] "+r" (x0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
+#define ATOMIC_FETCH_OP_SUB(name, mb, cl...)                           \
+static inline int atomic_fetch_sub##name(int i, atomic_t *v)           \
+{                                                                      \
+       register int w0 asm ("w0") = i;                                 \
+       register atomic_t *x1 asm ("x1") = v;                           \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       "       nop\n"                                                  \
+       __LL_SC_ATOMIC(fetch_sub##name),                                \
+       /* LSE atomics */                                               \
+       "       neg     %w[i], %w[i]\n"                                 \
+       "       ldadd" #mb "    %w[i], %w[i], %[v]")                    \
+       : [i] "+r" (w0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
+                                                                       \
+       return w0;                                                      \
 }
 
-static inline void atomic64_or(long i, atomic64_t *v)
-{
-       register long x0 asm ("x0") = i;
-       register atomic64_t *x1 asm ("x1") = v;
+ATOMIC_FETCH_OP_SUB(_relaxed,   )
+ATOMIC_FETCH_OP_SUB(_acquire,  a, "memory")
+ATOMIC_FETCH_OP_SUB(_release,  l, "memory")
+ATOMIC_FETCH_OP_SUB(        , al, "memory")
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
-       "       stset   %[i], %[v]\n")
-       : [i] "+r" (x0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
+#undef ATOMIC_FETCH_OP_SUB
+#undef __LL_SC_ATOMIC
+
+#define __LL_SC_ATOMIC64(op)   __LL_SC_CALL(atomic64_##op)
+#define ATOMIC64_OP(op, asm_op)                                                \
+static inline void atomic64_##op(long i, atomic64_t *v)                        \
+{                                                                      \
+       register long x0 asm ("x0") = i;                                \
+       register atomic64_t *x1 asm ("x1") = v;                         \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op),        \
+"      " #asm_op "     %[i], %[v]\n")                                  \
+       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : __LL_SC_CLOBBERS);                                            \
 }
 
-static inline void atomic64_xor(long i, atomic64_t *v)
-{
-       register long x0 asm ("x0") = i;
-       register atomic64_t *x1 asm ("x1") = v;
+ATOMIC64_OP(andnot, stclr)
+ATOMIC64_OP(or, stset)
+ATOMIC64_OP(xor, steor)
+ATOMIC64_OP(add, stadd)
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
-       "       steor   %[i], %[v]\n")
-       : [i] "+r" (x0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
+#undef ATOMIC64_OP
+
+#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)                 \
+static inline long atomic64_fetch_##op##name(long i, atomic64_t *v)    \
+{                                                                      \
+       register long x0 asm ("x0") = i;                                \
+       register atomic64_t *x1 asm ("x1") = v;                         \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       __LL_SC_ATOMIC64(fetch_##op##name),                             \
+       /* LSE atomics */                                               \
+"      " #asm_op #mb " %[i], %[i], %[v]")                              \
+       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
+                                                                       \
+       return x0;                                                      \
 }
 
-static inline void atomic64_add(long i, atomic64_t *v)
-{
-       register long x0 asm ("x0") = i;
-       register atomic64_t *x1 asm ("x1") = v;
+#define ATOMIC64_FETCH_OPS(op, asm_op)                                 \
+       ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)                     \
+       ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")           \
+       ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")           \
+       ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
 
-       asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
-       "       stadd   %[i], %[v]\n")
-       : [i] "+r" (x0), [v] "+Q" (v->counter)
-       : "r" (x1)
-       : __LL_SC_CLOBBERS);
-}
+ATOMIC64_FETCH_OPS(andnot, ldclr)
+ATOMIC64_FETCH_OPS(or, ldset)
+ATOMIC64_FETCH_OPS(xor, ldeor)
+ATOMIC64_FETCH_OPS(add, ldadd)
+
+#undef ATOMIC64_FETCH_OP
+#undef ATOMIC64_FETCH_OPS
 
 #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)                                \
 static inline long atomic64_add_return##name(long i, atomic64_t *v)    \
@@ -260,6 +320,33 @@ static inline void atomic64_and(long i, atomic64_t *v)
        : __LL_SC_CLOBBERS);
 }
 
+#define ATOMIC64_FETCH_OP_AND(name, mb, cl...)                         \
+static inline long atomic64_fetch_and##name(long i, atomic64_t *v)     \
+{                                                                      \
+       register long x0 asm ("w0") = i;                                \
+       register atomic64_t *x1 asm ("x1") = v;                         \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       "       nop\n"                                                  \
+       __LL_SC_ATOMIC64(fetch_and##name),                              \
+       /* LSE atomics */                                               \
+       "       mvn     %[i], %[i]\n"                                   \
+       "       ldclr" #mb "    %[i], %[i], %[v]")                      \
+       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
+                                                                       \
+       return x0;                                                      \
+}
+
+ATOMIC64_FETCH_OP_AND(_relaxed,   )
+ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
+ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
+ATOMIC64_FETCH_OP_AND(        , al, "memory")
+
+#undef ATOMIC64_FETCH_OP_AND
+
 static inline void atomic64_sub(long i, atomic64_t *v)
 {
        register long x0 asm ("x0") = i;
@@ -306,6 +393,33 @@ ATOMIC64_OP_SUB_RETURN(        , al, "memory")
 
 #undef ATOMIC64_OP_SUB_RETURN
 
+#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)                         \
+static inline long atomic64_fetch_sub##name(long i, atomic64_t *v)     \
+{                                                                      \
+       register long x0 asm ("w0") = i;                                \
+       register atomic64_t *x1 asm ("x1") = v;                         \
+                                                                       \
+       asm volatile(ARM64_LSE_ATOMIC_INSN(                             \
+       /* LL/SC */                                                     \
+       "       nop\n"                                                  \
+       __LL_SC_ATOMIC64(fetch_sub##name),                              \
+       /* LSE atomics */                                               \
+       "       neg     %[i], %[i]\n"                                   \
+       "       ldadd" #mb "    %[i], %[i], %[v]")                      \
+       : [i] "+r" (x0), [v] "+Q" (v->counter)                          \
+       : "r" (x1)                                                      \
+       : __LL_SC_CLOBBERS, ##cl);                                      \
+                                                                       \
+       return x0;                                                      \
+}
+
+ATOMIC64_FETCH_OP_SUB(_relaxed,   )
+ATOMIC64_FETCH_OP_SUB(_acquire,  a, "memory")
+ATOMIC64_FETCH_OP_SUB(_release,  l, "memory")
+ATOMIC64_FETCH_OP_SUB(        , al, "memory")
+
+#undef ATOMIC64_FETCH_OP_SUB
+
 static inline long atomic64_dec_if_positive(atomic64_t *v)
 {
        register long x0 asm ("x0") = (long)v;
index dae5c49618db38021b418379bcb9fe52529ec862..4eea7f618dcefff4be6e3f4d99c503cba5e7c779 100644 (file)
@@ -91,6 +91,19 @@ do {                                                                 \
        __u.__val;                                                      \
 })
 
+#define smp_cond_load_acquire(ptr, cond_expr)                          \
+({                                                                     \
+       typeof(ptr) __PTR = (ptr);                                      \
+       typeof(*ptr) VAL;                                               \
+       for (;;) {                                                      \
+               VAL = smp_load_acquire(__PTR);                          \
+               if (cond_expr)                                          \
+                       break;                                          \
+               __cmpwait_relaxed(__PTR, VAL);                          \
+       }                                                               \
+       VAL;                                                            \
+})
+
 #include <asm-generic/barrier.h>
 
 #endif /* __ASSEMBLY__ */
index 510c7b4044547f82750ca9295e6d35bdeb0b67bf..bd86a79491bce84d34576d261214216282265d9a 100644 (file)
@@ -224,4 +224,55 @@ __CMPXCHG_GEN(_mb)
        __ret;                                                          \
 })
 
+#define __CMPWAIT_CASE(w, sz, name)                                    \
+static inline void __cmpwait_case_##name(volatile void *ptr,           \
+                                        unsigned long val)             \
+{                                                                      \
+       unsigned long tmp;                                              \
+                                                                       \
+       asm volatile(                                                   \
+       "       ldxr" #sz "\t%" #w "[tmp], %[v]\n"              \
+       "       eor     %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"     \
+       "       cbnz    %" #w "[tmp], 1f\n"                             \
+       "       wfe\n"                                                  \
+       "1:"                                                            \
+       : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr)           \
+       : [val] "r" (val));                                             \
+}
+
+__CMPWAIT_CASE(w, b, 1);
+__CMPWAIT_CASE(w, h, 2);
+__CMPWAIT_CASE(w,  , 4);
+__CMPWAIT_CASE( ,  , 8);
+
+#undef __CMPWAIT_CASE
+
+#define __CMPWAIT_GEN(sfx)                                             \
+static inline void __cmpwait##sfx(volatile void *ptr,                  \
+                                 unsigned long val,                    \
+                                 int size)                             \
+{                                                                      \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               return __cmpwait_case##sfx##_1(ptr, (u8)val);           \
+       case 2:                                                         \
+               return __cmpwait_case##sfx##_2(ptr, (u16)val);          \
+       case 4:                                                         \
+               return __cmpwait_case##sfx##_4(ptr, val);               \
+       case 8:                                                         \
+               return __cmpwait_case##sfx##_8(ptr, val);               \
+       default:                                                        \
+               BUILD_BUG();                                            \
+       }                                                               \
+                                                                       \
+       unreachable();                                                  \
+}
+
+__CMPWAIT_GEN()
+
+#undef __CMPWAIT_GEN
+
+#define __cmpwait_relaxed(ptr, val) \
+       __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
+
 #endif /* __ASM_CMPXCHG_H */
index 87e1985f3be8c5912d1b1ca4fbc5d8d70024db50..9d9fd4b9a72e574baba29be7381e8740f06b2d66 100644 (file)
 #define APM_CPU_PART_POTENZA           0x000
 
 #define CAVIUM_CPU_PART_THUNDERX       0x0A1
+#define CAVIUM_CPU_PART_THUNDERX_81XX  0x0A2
 
 #define BRCM_CPU_PART_VULCAN           0x516
 
 #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
 #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
 #define MIDR_THUNDERX  MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 
 #ifndef __ASSEMBLY__
 
index 622db3c6474e2d5c51b3a1689869534cf16019ee..bd887663689bd4fdc5b6e9f1f144be4d5eb8fdc3 100644 (file)
@@ -23,10 +23,10 @@ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
        efi_virtmap_load();                                             \
 })
 
-#define arch_efi_call_virt(f, args...)                                 \
+#define arch_efi_call_virt(p, f, args...)                              \
 ({                                                                     \
        efi_##f##_t *__f;                                               \
-       __f = efi.systab->runtime->f;                                   \
+       __f = p->f;                                                     \
        __f(args);                                                      \
 })
 
index a307eb6e7fa8d75e536fe3cff56c5d4f978fe212..7f94755089e200afbd4c015316da4517fe113bcd 100644 (file)
@@ -117,6 +117,8 @@ struct pt_regs {
        };
        u64 orig_x0;
        u64 syscallno;
+       u64 orig_addr_limit;
+       u64 unused;     // maintain 16 byte alignment
 };
 
 #define arch_has_single_step() (1)
index f8e5d47f08807aa41d33c84a323e4bf1f37ffebf..2f4ba774488ad5cc72444a8d351ce63fa504351c 100644 (file)
@@ -60,6 +60,7 @@ int main(void)
   DEFINE(S_PC,                 offsetof(struct pt_regs, pc));
   DEFINE(S_ORIG_X0,            offsetof(struct pt_regs, orig_x0));
   DEFINE(S_SYSCALLNO,          offsetof(struct pt_regs, syscallno));
+  DEFINE(S_ORIG_ADDR_LIMIT,    offsetof(struct pt_regs, orig_addr_limit));
   DEFINE(S_FRAME_SIZE,         sizeof(struct pt_regs));
   BLANK();
   DEFINE(MM_CONTEXT_ID,                offsetof(struct mm_struct, context.id.counter));
index d42789499f17eb322a47da83046ca0119e643c75..af716b65110d901d1a1fea40da801240475c9ccf 100644 (file)
@@ -98,6 +98,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                MIDR_RANGE(MIDR_THUNDERX, 0x00,
                           (1 << MIDR_VARIANT_SHIFT) | 1),
        },
+       {
+       /* Cavium ThunderX, T81 pass 1.0 */
+               .desc = "Cavium erratum 27456",
+               .capability = ARM64_WORKAROUND_CAVIUM_27456,
+               MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
+       },
 #endif
        {
        }
index 12e8d2bcb3f9da4e643649442685138dea7966d1..6c3b7345a6c428f5952909ac0833684ca0401f1a 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/errno.h>
 #include <asm/esr.h>
 #include <asm/irq.h>
+#include <asm/memory.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
 
        mov     x29, xzr                        // fp pointed to user-space
        .else
        add     x21, sp, #S_FRAME_SIZE
-       .endif
+       get_thread_info tsk
+       /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+       ldr     x20, [tsk, #TI_ADDR_LIMIT]
+       str     x20, [sp, #S_ORIG_ADDR_LIMIT]
+       mov     x20, #TASK_SIZE_64
+       str     x20, [tsk, #TI_ADDR_LIMIT]
+       ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
+       .endif /* \el == 0 */
        mrs     x22, elr_el1
        mrs     x23, spsr_el1
        stp     lr, x21, [sp, #S_LR]
        .endm
 
        .macro  kernel_exit, el
+       .if     \el != 0
+       /* Restore the task's original addr_limit. */
+       ldr     x20, [sp, #S_ORIG_ADDR_LIMIT]
+       str     x20, [tsk, #TI_ADDR_LIMIT]
+
+       /* No need to restore UAO, it will be restored from SPSR_EL1 */
+       .endif
+
        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
        .if     \el == 0
        ct_user_enter
@@ -406,7 +422,6 @@ el1_irq:
        bl      trace_hardirqs_off
 #endif
 
-       get_thread_info tsk
        irq_handler
 
 #ifdef CONFIG_PREEMPT
index 013e2cbe792474eb3777c8f01ff65f2356527260..b1166d1e5955cd80096ea7412a2371008ec81b7c 100644 (file)
@@ -280,7 +280,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        }
 
        if (permission_fault(esr) && (addr < USER_DS)) {
-               if (get_fs() == KERNEL_DS)
+               /* regs->orig_addr_limit may be 0 if we entered from EL0 */
+               if (regs->orig_addr_limit == KERNEL_DS)
                        die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
 
                if (!search_exception_tables(regs->pc))
index d74fd8ce980aeb327b831fe8c7b4d862b6421524..3d5ce38a6f0b17fbaff74866a1d9b9bb5a8723b2 100644 (file)
@@ -41,21 +41,49 @@ static inline int __atomic_##op##_return(int i, atomic_t *v)                \
        return result;                                                  \
 }
 
+#define ATOMIC_FETCH_OP(op, asm_op, asm_con)                           \
+static inline int __atomic_fetch_##op(int i, atomic_t *v)              \
+{                                                                      \
+       int result, val;                                                \
+                                                                       \
+       asm volatile(                                                   \
+               "/* atomic_fetch_" #op " */\n"                          \
+               "1:     ssrf    5\n"                                    \
+               "       ld.w    %0, %3\n"                               \
+               "       mov     %1, %0\n"                               \
+               "       " #asm_op "     %1, %4\n"                       \
+               "       stcond  %2, %1\n"                               \
+               "       brne    1b"                                     \
+               : "=&r" (result), "=&r" (val), "=o" (v->counter)        \
+               : "m" (v->counter), #asm_con (i)                        \
+               : "cc");                                                \
+                                                                       \
+       return result;                                                  \
+}
+
 ATOMIC_OP_RETURN(sub, sub, rKs21)
 ATOMIC_OP_RETURN(add, add, r)
+ATOMIC_FETCH_OP (sub, sub, rKs21)
+ATOMIC_FETCH_OP (add, add, r)
 
-#define ATOMIC_OP(op, asm_op)                                          \
+#define ATOMIC_OPS(op, asm_op)                                         \
 ATOMIC_OP_RETURN(op, asm_op, r)                                                \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
        (void)__atomic_##op##_return(i, v);                             \
+}                                                                      \
+ATOMIC_FETCH_OP(op, asm_op, r)                                         \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                \
+{                                                                      \
+       return __atomic_fetch_##op(i, v);                               \
 }
 
-ATOMIC_OP(and, and)
-ATOMIC_OP(or, or)
-ATOMIC_OP(xor, eor)
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, eor)
 
-#undef ATOMIC_OP
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 
 /*
@@ -87,6 +115,14 @@ static inline int atomic_add_return(int i, atomic_t *v)
        return __atomic_add_return(i, v);
 }
 
+static inline int atomic_fetch_add(int i, atomic_t *v)
+{
+       if (IS_21BIT_CONST(i))
+               return __atomic_fetch_sub(-i, v);
+
+       return __atomic_fetch_add(i, v);
+}
+
 /*
  * atomic_sub_return - subtract the atomic variable
  * @i: integer value to subtract
@@ -102,6 +138,14 @@ static inline int atomic_sub_return(int i, atomic_t *v)
        return __atomic_add_return(-i, v);
 }
 
+static inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+       if (IS_21BIT_CONST(i))
+               return __atomic_fetch_sub(i, v);
+
+       return __atomic_fetch_add(-i, v);
+}
+
 /*
  * __atomic_add_unless - add unless the number is a given value
  * @v: pointer of type atomic_t
index 1c1c42330c99c9e432991429778625be6e5235df..63c7deceeeb6e8f69c5d77e1b518174401229767 100644 (file)
@@ -17,6 +17,7 @@
 
 asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr);
 asmlinkage int __raw_atomic_add_asm(volatile int *ptr, int value);
+asmlinkage int __raw_atomic_xadd_asm(volatile int *ptr, int value);
 
 asmlinkage int __raw_atomic_and_asm(volatile int *ptr, int value);
 asmlinkage int __raw_atomic_or_asm(volatile int *ptr, int value);
@@ -28,10 +29,17 @@ asmlinkage int __raw_atomic_test_asm(const volatile int *ptr, int value);
 #define atomic_add_return(i, v) __raw_atomic_add_asm(&(v)->counter, i)
 #define atomic_sub_return(i, v) __raw_atomic_add_asm(&(v)->counter, -(i))
 
+#define atomic_fetch_add(i, v) __raw_atomic_xadd_asm(&(v)->counter, i)
+#define atomic_fetch_sub(i, v) __raw_atomic_xadd_asm(&(v)->counter, -(i))
+
 #define atomic_or(i, v)  (void)__raw_atomic_or_asm(&(v)->counter, i)
 #define atomic_and(i, v) (void)__raw_atomic_and_asm(&(v)->counter, i)
 #define atomic_xor(i, v) (void)__raw_atomic_xor_asm(&(v)->counter, i)
 
+#define atomic_fetch_or(i, v)  __raw_atomic_or_asm(&(v)->counter, i)
+#define atomic_fetch_and(i, v) __raw_atomic_and_asm(&(v)->counter, i)
+#define atomic_fetch_xor(i, v) __raw_atomic_xor_asm(&(v)->counter, i)
+
 #endif
 
 #include <asm-generic/atomic.h>
index 490c7caa02d9337b7b91ae8db6934320e0e91715..c58f4a83ed6f343f6ef45962884352d6368f2dc8 100644 (file)
@@ -12,6 +12,8 @@
 #else
 
 #include <linux/atomic.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
 
 asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
 asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
@@ -48,8 +50,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 
 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
-       while (arch_spin_is_locked(lock))
-               cpu_relax();
+       smp_cond_load_acquire(&lock->lock, !VAL);
 }
 
 static inline int arch_read_can_lock(arch_rwlock_t *rw)
index a401c27b69b4a3feb825059d299ce27be64ba191..68096e8f787f7c3c3644bdd702fbeea05cf9eab3 100644 (file)
@@ -84,6 +84,7 @@ EXPORT_SYMBOL(insl_16);
 
 #ifdef CONFIG_SMP
 EXPORT_SYMBOL(__raw_atomic_add_asm);
+EXPORT_SYMBOL(__raw_atomic_xadd_asm);
 EXPORT_SYMBOL(__raw_atomic_and_asm);
 EXPORT_SYMBOL(__raw_atomic_or_asm);
 EXPORT_SYMBOL(__raw_atomic_xor_asm);
index 26fccb5568b97e4f4234c4e12a5f8cc8189bec56..1e2989c5d6b2bec2fb0ad4bdaacd5948b7a9b133 100644 (file)
@@ -605,6 +605,28 @@ ENTRY(___raw_atomic_add_asm)
        rts;
 ENDPROC(___raw_atomic_add_asm)
 
+/*
+ * r0 = ptr
+ * r1 = value
+ *
+ * ADD a signed value to a 32bit word and return the old value atomically.
+ * Clobbers: r3:0, p1:0
+ */
+ENTRY(___raw_atomic_xadd_asm)
+       p1 = r0;
+       r3 = r1;
+       [--sp] = rets;
+       call _get_core_lock;
+       r3 = [p1];
+       r2 = r3 + r2;
+       [p1] = r2;
+       r1 = p1;
+       call _put_core_lock;
+       r0 = r3;
+       rets = [sp++];
+       rts;
+ENDPROC(___raw_atomic_add_asm)
+
 /*
  * r0 = ptr
  * r1 = mask
@@ -618,10 +640,9 @@ ENTRY(___raw_atomic_and_asm)
        r3 = r1;
        [--sp] = rets;
        call _get_core_lock;
-       r2 = [p1];
-       r3 = r2 & r3;
-       [p1] = r3;
-       r3 = r2;
+       r3 = [p1];
+       r2 = r2 & r3;
+       [p1] = r2;
        r1 = p1;
        call _put_core_lock;
        r0 = r3;
@@ -642,10 +663,9 @@ ENTRY(___raw_atomic_or_asm)
        r3 = r1;
        [--sp] = rets;
        call _get_core_lock;
-       r2 = [p1];
-       r3 = r2 | r3;
-       [p1] = r3;
-       r3 = r2;
+       r3 = [p1];
+       r2 = r2 | r3;
+       [p1] = r2;
        r1 = p1;
        call _put_core_lock;
        r0 = r3;
@@ -666,10 +686,9 @@ ENTRY(___raw_atomic_xor_asm)
        r3 = r1;
        [--sp] = rets;
        call _get_core_lock;
-       r2 = [p1];
-       r3 = r2 ^ r3;
-       [p1] = r3;
-       r3 = r2;
+       r3 = [p1];
+       r2 = r2 ^ r3;
+       [p1] = r2;
        r1 = p1;
        call _put_core_lock;
        r0 = r3;
index 64f02d451aa8c915c81eebbd4c34f5808449a97b..1c2a5e264fc71cfd52f2acb0b24ddb1aff792be7 100644 (file)
@@ -60,16 +60,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
        return atomic_add_return(i, v) < 0;
 }
 
-static inline void atomic_add(int i, atomic_t *v)
-{
-       atomic_add_return(i, v);
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
-       atomic_sub_return(i, v);
-}
-
 static inline void atomic_inc(atomic_t *v)
 {
        atomic_inc_return(v);
@@ -136,16 +126,6 @@ static inline long long atomic64_add_negative(long long i, atomic64_t *v)
        return atomic64_add_return(i, v) < 0;
 }
 
-static inline void atomic64_add(long long i, atomic64_t *v)
-{
-       atomic64_add_return(i, v);
-}
-
-static inline void atomic64_sub(long long i, atomic64_t *v)
-{
-       atomic64_sub_return(i, v);
-}
-
 static inline void atomic64_inc(atomic64_t *v)
 {
        atomic64_inc_return(v);
@@ -182,11 +162,19 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 }
 
 #define ATOMIC_OP(op)                                                  \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       return __atomic32_fetch_##op(i, &v->counter);                   \
+}                                                                      \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
        (void)__atomic32_fetch_##op(i, &v->counter);                    \
 }                                                                      \
                                                                        \
+static inline long long atomic64_fetch_##op(long long i, atomic64_t *v)        \
+{                                                                      \
+       return __atomic64_fetch_##op(i, &v->counter);                   \
+}                                                                      \
 static inline void atomic64_##op(long long i, atomic64_t *v)           \
 {                                                                      \
        (void)__atomic64_fetch_##op(i, &v->counter);                    \
@@ -195,6 +183,8 @@ static inline void atomic64_##op(long long i, atomic64_t *v)                \
 ATOMIC_OP(or)
 ATOMIC_OP(and)
 ATOMIC_OP(xor)
+ATOMIC_OP(add)
+ATOMIC_OP(sub)
 
 #undef ATOMIC_OP
 
index 36e126d2f801adbedc2340acfedde9141156027e..d4912c88b829dbd8115ad25336101cd47d0f8567 100644 (file)
@@ -162,6 +162,8 @@ ATOMIC_EXPORT(__atomic64_fetch_##op);
 ATOMIC_FETCH_OP(or)
 ATOMIC_FETCH_OP(and)
 ATOMIC_FETCH_OP(xor)
+ATOMIC_FETCH_OP(add)
+ATOMIC_FETCH_OP(sub)
 
 ATOMIC_OP_RETURN(add)
 ATOMIC_OP_RETURN(sub)
index bce0d0d07e606e67dbce64bd4a6b133e9ed50905..614c6d76789a19d1743d2f3a0bfb263c230f9759 100644 (file)
@@ -12,7 +12,3 @@
  * the base baud is derived from the clock speed and so is variable
  */
 #define BASE_BAUD 0
-
-#define STD_COM_FLAGS          UPF_BOOT_AUTOCONF
-
-#define SERIAL_PORT_DFNS
index 4435a445ae7ec7f8d5fa88edc649d34f6dfdd257..349a47a918db7252229445838357990d7ce8c598 100644 (file)
@@ -28,6 +28,19 @@ static inline int atomic_##op##_return(int i, atomic_t *v)   \
        return ret;                                             \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op)                              \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                \
+{                                                              \
+       h8300flags flags;                                       \
+       int ret;                                                \
+                                                               \
+       flags = arch_local_irq_save();                          \
+       ret = v->counter;                                       \
+       v->counter c_op i;                                      \
+       arch_local_irq_restore(flags);                          \
+       return ret;                                             \
+}
+
 #define ATOMIC_OP(op, c_op)                                    \
 static inline void atomic_##op(int i, atomic_t *v)             \
 {                                                              \
@@ -41,17 +54,21 @@ static inline void atomic_##op(int i, atomic_t *v)          \
 ATOMIC_OP_RETURN(add, +=)
 ATOMIC_OP_RETURN(sub, -=)
 
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or,  |=)
-ATOMIC_OP(xor, ^=)
+#define ATOMIC_OPS(op, c_op)                                   \
+       ATOMIC_OP(op, c_op)                                     \
+       ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &=)
+ATOMIC_OPS(or,  |=)
+ATOMIC_OPS(xor, ^=)
+ATOMIC_OPS(add, +=)
+ATOMIC_OPS(sub, -=)
 
+#undef ATOMIC_OPS
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
-#define atomic_add(i, v)               (void)atomic_add_return(i, v)
 #define atomic_add_negative(a, v)      (atomic_add_return((a), (v)) < 0)
-
-#define atomic_sub(i, v)               (void)atomic_sub_return(i, v)
 #define atomic_sub_and_test(i, v)      (atomic_sub_return(i, v) == 0)
 
 #define atomic_inc_return(v)           atomic_add_return(1, v)
index 55696c4100d468208c757f30be47806c877dfab9..a62ba368b27d1ddf6f750efd1dcaa2b51419d1d8 100644 (file)
@@ -110,7 +110,7 @@ static inline void atomic_##op(int i, atomic_t *v)                  \
        );                                                              \
 }                                                                      \
 
-#define ATOMIC_OP_RETURN(op)                                                   \
+#define ATOMIC_OP_RETURN(op)                                           \
 static inline int atomic_##op##_return(int i, atomic_t *v)             \
 {                                                                      \
        int output;                                                     \
@@ -127,16 +127,37 @@ static inline int atomic_##op##_return(int i, atomic_t *v)                \
        return output;                                                  \
 }
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)                                            \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       int output, val;                                                \
+                                                                       \
+       __asm__ __volatile__ (                                          \
+               "1:     %0 = memw_locked(%2);\n"                        \
+               "       %1 = "#op "(%0,%3);\n"                          \
+               "       memw_locked(%2,P3)=%1;\n"                       \
+               "       if !P3 jump 1b;\n"                              \
+               : "=&r" (output), "=&r" (val)                           \
+               : "r" (&v->counter), "r" (i)                            \
+               : "memory", "p3"                                        \
+       );                                                              \
+       return output;                                                  \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 12ca4ebc033855c93879c81bafc65af93b9bde97..a1c55788c5d6591eac4786b2dc0fcdd3164eb65c 100644 (file)
@@ -23,6 +23,8 @@
 #define _ASM_SPINLOCK_H
 
 #include <asm/irqflags.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
 
 /*
  * This file is pulled in for SMP builds.
@@ -176,8 +178,12 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
  * SMP spinlocks are intended to allow only a single CPU at the lock
  */
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(lock) \
-       do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->lock, !VAL);
+}
+
 #define arch_spin_is_locked(x) ((x)->lock != 0)
 
 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
index 8dfb5f6f6c352c45126656216edb228e15026be9..f565ad3761427afb14625ee612bdb6067c8d28c2 100644 (file)
@@ -42,8 +42,27 @@ ia64_atomic_##op (int i, atomic_t *v)                                        \
        return new;                                                     \
 }
 
-ATOMIC_OP(add, +)
-ATOMIC_OP(sub, -)
+#define ATOMIC_FETCH_OP(op, c_op)                                      \
+static __inline__ int                                                  \
+ia64_atomic_fetch_##op (int i, atomic_t *v)                            \
+{                                                                      \
+       __s32 old, new;                                                 \
+       CMPXCHG_BUGCHECK_DECL                                           \
+                                                                       \
+       do {                                                            \
+               CMPXCHG_BUGCHECK(v);                                    \
+               old = atomic_read(v);                                   \
+               new = old c_op i;                                       \
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
+       return old;                                                     \
+}
+
+#define ATOMIC_OPS(op, c_op)                                           \
+       ATOMIC_OP(op, c_op)                                             \
+       ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(add, +)
+ATOMIC_OPS(sub, -)
 
 #define atomic_add_return(i,v)                                         \
 ({                                                                     \
@@ -69,14 +88,44 @@ ATOMIC_OP(sub, -)
                : ia64_atomic_sub(__ia64_asr_i, v);                     \
 })
 
-ATOMIC_OP(and, &)
-ATOMIC_OP(or, |)
-ATOMIC_OP(xor, ^)
+#define atomic_fetch_add(i,v)                                          \
+({                                                                     \
+       int __ia64_aar_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
+            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
+            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
+            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+               ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)       \
+               : ia64_atomic_fetch_add(__ia64_aar_i, v);               \
+})
+
+#define atomic_fetch_sub(i,v)                                          \
+({                                                                     \
+       int __ia64_asr_i = (i);                                         \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
+            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
+            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
+            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+               ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)      \
+               : ia64_atomic_fetch_sub(__ia64_asr_i, v);               \
+})
+
+ATOMIC_FETCH_OP(and, &)
+ATOMIC_FETCH_OP(or, |)
+ATOMIC_FETCH_OP(xor, ^)
+
+#define atomic_and(i,v)        (void)ia64_atomic_fetch_and(i,v)
+#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
+#define atomic_xor(i,v)        (void)ia64_atomic_fetch_xor(i,v)
 
-#define atomic_and(i,v)        (void)ia64_atomic_and(i,v)
-#define atomic_or(i,v) (void)ia64_atomic_or(i,v)
-#define atomic_xor(i,v)        (void)ia64_atomic_xor(i,v)
+#define atomic_fetch_and(i,v)  ia64_atomic_fetch_and(i,v)
+#define atomic_fetch_or(i,v)   ia64_atomic_fetch_or(i,v)
+#define atomic_fetch_xor(i,v)  ia64_atomic_fetch_xor(i,v)
 
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP
 
 #define ATOMIC64_OP(op, c_op)                                          \
@@ -94,8 +143,27 @@ ia64_atomic64_##op (__s64 i, atomic64_t *v)                         \
        return new;                                                     \
 }
 
-ATOMIC64_OP(add, +)
-ATOMIC64_OP(sub, -)
+#define ATOMIC64_FETCH_OP(op, c_op)                                    \
+static __inline__ long                                                 \
+ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v)                      \
+{                                                                      \
+       __s64 old, new;                                                 \
+       CMPXCHG_BUGCHECK_DECL                                           \
+                                                                       \
+       do {                                                            \
+               CMPXCHG_BUGCHECK(v);                                    \
+               old = atomic64_read(v);                                 \
+               new = old c_op i;                                       \
+       } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
+       return old;                                                     \
+}
+
+#define ATOMIC64_OPS(op, c_op)                                         \
+       ATOMIC64_OP(op, c_op)                                           \
+       ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(add, +)
+ATOMIC64_OPS(sub, -)
 
 #define atomic64_add_return(i,v)                                       \
 ({                                                                     \
@@ -121,14 +189,44 @@ ATOMIC64_OP(sub, -)
                : ia64_atomic64_sub(__ia64_asr_i, v);                   \
 })
 
-ATOMIC64_OP(and, &)
-ATOMIC64_OP(or, |)
-ATOMIC64_OP(xor, ^)
+#define atomic64_fetch_add(i,v)                                                \
+({                                                                     \
+       long __ia64_aar_i = (i);                                        \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)           \
+            || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)           \
+            || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)           \
+            || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))         \
+               ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)       \
+               : ia64_atomic64_fetch_add(__ia64_aar_i, v);             \
+})
+
+#define atomic64_fetch_sub(i,v)                                                \
+({                                                                     \
+       long __ia64_asr_i = (i);                                        \
+       (__builtin_constant_p(i)                                        \
+        && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)          \
+            || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)          \
+            || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)          \
+            || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))        \
+               ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)      \
+               : ia64_atomic64_fetch_sub(__ia64_asr_i, v);             \
+})
+
+ATOMIC64_FETCH_OP(and, &)
+ATOMIC64_FETCH_OP(or, |)
+ATOMIC64_FETCH_OP(xor, ^)
+
+#define atomic64_and(i,v)      (void)ia64_atomic64_fetch_and(i,v)
+#define atomic64_or(i,v)       (void)ia64_atomic64_fetch_or(i,v)
+#define atomic64_xor(i,v)      (void)ia64_atomic64_fetch_xor(i,v)
 
-#define atomic64_and(i,v)      (void)ia64_atomic64_and(i,v)
-#define atomic64_or(i,v)       (void)ia64_atomic64_or(i,v)
-#define atomic64_xor(i,v)      (void)ia64_atomic64_xor(i,v)
+#define atomic64_fetch_and(i,v)        ia64_atomic64_fetch_and(i,v)
+#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
+#define atomic64_fetch_xor(i,v)        ia64_atomic64_fetch_xor(i,v)
 
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP
 
 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
index f41e66d65e31c4b6c1eb15e9bdb0ee71d2018378..28cb819e0ff93adadf6ffbb1974cdcd1a5a9d8fc 100644 (file)
@@ -82,7 +82,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-       if (cmpxchg_acq(count, 1, 0) == 1)
+       if (atomic_read(count) == 1 && cmpxchg_acq(count, 1, 0) == 1)
                return 1;
        return 0;
 }
index 8b23e070b8440e0ce0989ec07bb416c55ad86e35..8fa98dd303b4b4733f862cce39cca2b82235f1b7 100644 (file)
@@ -40,7 +40,7 @@
 static inline void
 __down_read (struct rw_semaphore *sem)
 {
-       long result = ia64_fetchadd8_acq((unsigned long *)&sem->count, 1);
+       long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
 
        if (result < 0)
                rwsem_down_read_failed(sem);
@@ -55,9 +55,9 @@ ___down_write (struct rw_semaphore *sem)
        long old, new;
 
        do {
-               old = sem->count;
+               old = atomic_long_read(&sem->count);
                new = old + RWSEM_ACTIVE_WRITE_BIAS;
-       } while (cmpxchg_acq(&sem->count, old, new) != old);
+       } while (atomic_long_cmpxchg_acquire(&sem->count, old, new) != old);
 
        return old;
 }
@@ -85,7 +85,7 @@ __down_write_killable (struct rw_semaphore *sem)
 static inline void
 __up_read (struct rw_semaphore *sem)
 {
-       long result = ia64_fetchadd8_rel((unsigned long *)&sem->count, -1);
+       long result = ia64_fetchadd8_rel((unsigned long *)&sem->count.counter, -1);
 
        if (result < 0 && (--result & RWSEM_ACTIVE_MASK) == 0)
                rwsem_wake(sem);
@@ -100,9 +100,9 @@ __up_write (struct rw_semaphore *sem)
        long old, new;
 
        do {
-               old = sem->count;
+               old = atomic_long_read(&sem->count);
                new = old - RWSEM_ACTIVE_WRITE_BIAS;
-       } while (cmpxchg_rel(&sem->count, old, new) != old);
+       } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
 
        if (new < 0 && (new & RWSEM_ACTIVE_MASK) == 0)
                rwsem_wake(sem);
@@ -115,8 +115,8 @@ static inline int
 __down_read_trylock (struct rw_semaphore *sem)
 {
        long tmp;
-       while ((tmp = sem->count) >= 0) {
-               if (tmp == cmpxchg_acq(&sem->count, tmp, tmp+1)) {
+       while ((tmp = atomic_long_read(&sem->count)) >= 0) {
+               if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp, tmp+1)) {
                        return 1;
                }
        }
@@ -129,8 +129,8 @@ __down_read_trylock (struct rw_semaphore *sem)
 static inline int
 __down_write_trylock (struct rw_semaphore *sem)
 {
-       long tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
-                             RWSEM_ACTIVE_WRITE_BIAS);
+       long tmp = atomic_long_cmpxchg_acquire(&sem->count,
+                       RWSEM_UNLOCKED_VALUE, RWSEM_ACTIVE_WRITE_BIAS);
        return tmp == RWSEM_UNLOCKED_VALUE;
 }
 
@@ -143,19 +143,12 @@ __downgrade_write (struct rw_semaphore *sem)
        long old, new;
 
        do {
-               old = sem->count;
+               old = atomic_long_read(&sem->count);
                new = old - RWSEM_WAITING_BIAS;
-       } while (cmpxchg_rel(&sem->count, old, new) != old);
+       } while (atomic_long_cmpxchg_release(&sem->count, old, new) != old);
 
        if (old < 0)
                rwsem_downgrade_wake(sem);
 }
 
-/*
- * Implement atomic add functionality.  These used to be "inline" functions, but GCC v3.1
- * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
- */
-#define rwsem_atomic_add(delta, sem)   atomic64_add(delta, (atomic64_t *)(&(sem)->count))
-#define rwsem_atomic_update(delta, sem)        atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
-
 #endif /* _ASM_IA64_RWSEM_H */
index 45698cd15b7b85a19a59e26ea8c91f9255f6454e..ca9e76149a4aa16974047820829e6c017afe77dd 100644 (file)
@@ -15,6 +15,8 @@
 
 #include <linux/atomic.h>
 #include <asm/intrinsics.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
 
 #define arch_spin_lock_init(x)                 ((x)->lock = 0)
 
@@ -86,6 +88,8 @@ static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
                        return;
                cpu_relax();
        }
+
+       smp_acquire__after_ctrl_dep();
 }
 
 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
index 01d877c6868f05e5a8fb72639a980cd4b7ef66dc..cf3023dced49d7fbe55ab50a8f357b95e5f5194a 100644 (file)
@@ -8,12 +8,13 @@
 
 #include <asm/processor.h>
 
-static void putc(char c);
+static void m32r_putc(char c);
 
 static int puts(const char *s)
 {
        char c;
-       while ((c = *s++)) putc(c);
+       while ((c = *s++))
+               m32r_putc(c);
        return 0;
 }
 
@@ -41,7 +42,7 @@ static int puts(const char *s)
 #define BOOT_SIO0TXB   PLD_ESIO0TXB
 #endif
 
-static void putc(char c)
+static void m32r_putc(char c)
 {
        while ((*BOOT_SIO0STS & 0x3) != 0x3)
                cpu_relax();
@@ -61,7 +62,7 @@ static void putc(char c)
 #define SIO0TXB        (volatile unsigned short *)(0x00efd000 + 30)
 #endif
 
-static void putc(char c)
+static void m32r_putc(char c)
 {
        while ((*SIO0STS & 0x1) == 0)
                cpu_relax();
index ea35160d632bfc073e15c912262d2190d05371b7..640cc1c7099f293700fe8d2ecbb4535da547632b 100644 (file)
@@ -89,16 +89,44 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v)              \
        return result;                                                  \
 }
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)                                            \
+static __inline__ int atomic_fetch_##op(int i, atomic_t *v)            \
+{                                                                      \
+       unsigned long flags;                                            \
+       int result, val;                                                \
+                                                                       \
+       local_irq_save(flags);                                          \
+       __asm__ __volatile__ (                                          \
+               "# atomic_fetch_" #op "         \n\t"                   \
+               DCACHE_CLEAR("%0", "r4", "%2")                          \
+               M32R_LOCK" %1, @%2;             \n\t"                   \
+               "mv %0, %1                      \n\t"                   \
+               #op " %1, %3;                   \n\t"                   \
+               M32R_UNLOCK" %1, @%2;           \n\t"                   \
+               : "=&r" (result), "=&r" (val)                           \
+               : "r" (&v->counter), "r" (i)                            \
+               : "memory"                                              \
+               __ATOMIC_CLOBBER                                        \
+       );                                                              \
+       local_irq_restore(flags);                                       \
+                                                                       \
+       return result;                                                  \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index fa13694eaae35f29233e3a477c5d6e2aebc95f5e..323c7fc953cdefa3e0b89efaafbe8aace9ed41c0 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/atomic.h>
 #include <asm/dcache_clear.h>
 #include <asm/page.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
 
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
 
 #define arch_spin_is_locked(x)         (*(volatile int *)(&(x)->slock) <= 0)
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
-               do { cpu_relax(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->slock, VAL > 0);
+}
 
 /**
  * arch_spin_trylock - Try spin lock and return a result
index fa31be297b85acf1ba31ca4059f2f48825c91d87..73d92ea0ce65b5caca8e79f208995c034872b004 100644 (file)
@@ -288,7 +288,7 @@ _clear_bss:
 #endif
 
        /*
-        *      Assember start up done, start code proper.
+        *      Assembler start up done, start code proper.
         */
        jsr     start_kernel                    /* start Linux kernel */
 
index c525e4c08f8477f45bb0e978b0c436af07471cbc..9abb1a441da082e2fcf8724a0171adec50e4e7b1 100644 (file)
@@ -111,7 +111,7 @@ void __init config_BSP(char *commandp, int size)
 /***************************************************************************/
 
 /*
- * Some 5272 based boards have the FEC ethernet diectly connected to
+ * Some 5272 based boards have the FEC ethernet directly connected to
  * an ethernet switch. In this case we need to use the fixed phy type,
  * and we need to declare it early in boot.
  */
index 821de928dc3f9d14adbb16c1ad70accc9e57f477..6a640be485684f00497a63ee9630bd77df9dbe4a 100644 (file)
@@ -42,7 +42,7 @@ static unsigned long iospace;
 
 /*
  * We need to be carefull probing on bus 0 (directly connected to host
- * bridge). We should only acccess the well defined possible devices in
+ * bridge). We should only access the well defined possible devices in
  * use, ignore aliases and the like.
  */
 static unsigned char mcf_host_slot2sid[32] = {
index 3ee6976f60885e5b2e0e4e2a80df6d824de14694..8f5b6f7dd1366024b973a6eb7846bdd0dd47bd02 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -359,6 +360,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -553,7 +555,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index e96787ffcbced33f9893d2760259177086b13c1f..31bded9c83d45f3a4b6861e2a5a11379324b4209 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -341,6 +342,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -512,7 +514,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 083fe6beac149da81250ca4fccd50798f9e4ef0d..0d7739e04ae2f13d63719db1a45a3a712b668e36 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -350,6 +351,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -533,7 +535,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 475130c06dcba04b646ec2e7c199aee7045dea7d..2cbb5c465fec03487046c08b8aef427a6cc81518 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -340,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 4339658c200f3eb8af4bc2645836e109466055cb..96102a42c156ee2d94fa5e069ba8e8c7f879387d 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -341,6 +342,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -514,7 +516,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 831cc8c3a2e259f67d318257e72bcab84e36b8c1..97d88f7dc5a7bd60db5f05db2483bec15de4723b 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -357,6 +358,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -536,7 +538,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 6377afeb522bbb9a235d1bd57f36cc14ca36ac5b..be25ef208f0f6237f055dea3b6cef375e59ea0a9 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -390,6 +391,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -616,7 +618,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 4304b3d56262bc677383ba4389b65d5e9a7625cd..a008344360c9161183afc063795d16bef1a945ff 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -339,6 +340,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 074bda4094ffd5b0913d4411371e2597774faab2..6735a25f36d446f389a0620faede9d3f37c42c8e 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -340,6 +341,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -504,7 +506,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 07b9fa8d7f2ea71dd09a26c8d9abe4a519baa07c..780c6e9f6cf9113df4c75ab3a2a8bd3220f741a4 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -346,6 +347,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -527,7 +529,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 36e6fae02d458e2dcb1a96c0caee68a5301f0341..44693cf361e5df9c2600f26d36e033e00e7bf2b2 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -337,6 +338,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -506,7 +508,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 903acf929511e5066403bfdd1c4d78bbc4084c35..ef0071d6115834657ee43e674dbb1b0fd8422482 100644 (file)
@@ -9,6 +9,7 @@ CONFIG_LOG_BUF_SHIFT=16
 # CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_USERFAULTFD=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
@@ -337,6 +338,7 @@ CONFIG_MACVTAP=m
 CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_GENEVE=m
+CONFIG_GTP=m
 CONFIG_MACSEC=m
 CONFIG_NETCONSOLE=m
 CONFIG_NETCONSOLE_DYNAMIC=y
@@ -506,7 +508,9 @@ CONFIG_TEST_STRING_HELPERS=m
 CONFIG_TEST_KSTRTOX=m
 CONFIG_TEST_PRINTF=m
 CONFIG_TEST_BITMAP=m
+CONFIG_TEST_UUID=m
 CONFIG_TEST_RHASHTABLE=m
+CONFIG_TEST_HASH=m
 CONFIG_TEST_LKM=m
 CONFIG_TEST_USER_COPY=m
 CONFIG_TEST_BPF=m
index 78cb60f5bb4dda18b28276c4b8b79c787e948590..9bbffebe3eb504833ed0937670bd4168751d61a4 100644 (file)
@@ -10191,7 +10191,7 @@ xdnrm_con:
 xdnrm_sd:
        mov.l           %a1,-(%sp)
        tst.b           LOCAL_EX(%a0)           # is denorm pos or neg?
-       smi.b           %d1                     # set d0 accodingly
+       smi.b           %d1                     # set d0 accordingly
        bsr.l           unf_sub
        mov.l           (%sp)+,%a1
 xdnrm_exit:
@@ -10990,7 +10990,7 @@ src_qnan_m:
 # routines where an instruction is selected by an index into
 # a large jump table corresponding to a given instruction which
 # has been decoded. Flow continues here where we now decode
-# further accoding to the source operand type.
+# further according to the source operand type.
 #
 
        global          fsinh
@@ -23196,14 +23196,14 @@ m_sign:
 #
 #  1. Branch on the sign of the adjusted exponent.
 #  2p.(positive exp)
-#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   2. Check M16 and the digits in lwords 2 and 3 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Subtract the count from the exp.
 #   5. Check if the exp has crossed zero in #3 above; make the exp abs
 #         and set SE.
 #      6. Multiply the mantissa by 10**count.
 #  2n.(negative exp)
-#   2. Check the digits in lwords 3 and 2 in decending order.
+#   2. Check the digits in lwords 3 and 2 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Add the count to the exp.
 #   5. Check if the exp has crossed zero in #3 above; clear SE.
index 4aedef973cf6fbd02cc67ea65bcbcede0da21c68..3535e6c87eec611bbe03c965c0fc5aeeaeff5343 100644 (file)
@@ -13156,14 +13156,14 @@ m_sign:
 #
 #  1. Branch on the sign of the adjusted exponent.
 #  2p.(positive exp)
-#   2. Check M16 and the digits in lwords 2 and 3 in decending order.
+#   2. Check M16 and the digits in lwords 2 and 3 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Subtract the count from the exp.
 #   5. Check if the exp has crossed zero in #3 above; make the exp abs
 #         and set SE.
 #      6. Multiply the mantissa by 10**count.
 #  2n.(negative exp)
-#   2. Check the digits in lwords 3 and 2 in decending order.
+#   2. Check the digits in lwords 3 and 2 in descending order.
 #   3. Add one for each zero encountered until a non-zero digit.
 #   4. Add the count to the exp.
 #   5. Check if the exp has crossed zero in #3 above; clear SE.
index 4858178260f90435810c8201e220d0bee16e446e..cf4c3a7b1a45d2bfac701d4e6a8a44e2f7f260d4 100644 (file)
@@ -53,6 +53,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v)           \
        return t;                                                       \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       int t, tmp;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+                       "1:     movel %2,%1\n"                          \
+                       "       " #asm_op "l %3,%1\n"                   \
+                       "       casl %2,%1,%0\n"                        \
+                       "       jne 1b"                                 \
+                       : "+m" (*v), "=&d" (t), "=&d" (tmp)             \
+                       : "g" (i), "2" (atomic_read(v)));               \
+       return tmp;                                                     \
+}
+
 #else
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
@@ -68,20 +83,41 @@ static inline int atomic_##op##_return(int i, atomic_t * v)         \
        return t;                                                       \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
+static inline int atomic_fetch_##op(int i, atomic_t * v)               \
+{                                                                      \
+       unsigned long flags;                                            \
+       int t;                                                          \
+                                                                       \
+       local_irq_save(flags);                                          \
+       t = v->counter;                                                 \
+       v->counter c_op i;                                              \
+       local_irq_restore(flags);                                       \
+                                                                       \
+       return t;                                                       \
+}
+
 #endif /* CONFIG_RMW_INSNS */
 
 #define ATOMIC_OPS(op, c_op, asm_op)                                   \
        ATOMIC_OP(op, c_op, asm_op)                                     \
-       ATOMIC_OP_RETURN(op, c_op, asm_op)
+       ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC_OPS(add, +=, add)
 ATOMIC_OPS(sub, -=, sub)
 
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, eor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op)                                   \
+       ATOMIC_OP(op, c_op, asm_op)                                     \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, eor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 429fe26e320c9813afdf88011195552498a90032..208b4daa14b334f4ce4365e8eb88ca91ab30b818 100644 (file)
@@ -18,7 +18,7 @@
  * AUG/22/2000 : added support for 32-bit Dual-Address-Mode (K) 2000
  *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  *
- * AUG/25/2000 : addad support for 8, 16 and 32-bit Single-Address-Mode (K)2000
+ * AUG/25/2000 : added support for 8, 16 and 32-bit Single-Address-Mode (K)2000
  *               Oliver Kamphenkel (O.Kamphenkel@tu-bs.de)
  *
  * APR/18/2002 : added proper support for MCF5272 DMA controller.
index f186459072e9b0042fbf413d9224367350adcfa6..699f20c8a0fee4fbe6e093ccf0cee8cf042f3cfb 100644 (file)
 /*
  *     I2C module.
  */
-#define MCFI2C_BASE0           (MCF_MBAR + 0x280)      /* Base addreess I2C0 */
+#define MCFI2C_BASE0           (MCF_MBAR + 0x280)      /* Base address I2C0 */
 #define MCFI2C_SIZE0           0x20                    /* Register set size */
 
-#define MCFI2C_BASE1           (MCF_MBAR2 + 0x440)     /* Base addreess I2C1 */
+#define MCFI2C_BASE1           (MCF_MBAR2 + 0x440)     /* Base address I2C1 */
 #define MCFI2C_SIZE1           0x20                    /* Register set size */
 
 /*
index 26cc3d5a63f82f2de7021a8a3da28e554f1c815e..8824236e303fe815fe99a24008ba4a7b125fb1a5 100644 (file)
@@ -38,7 +38,7 @@
 /*
  *     MMU Operation register.
  */
-#define        MMUOR_UAA       0x00000001              /* Update allocatiom address */
+#define        MMUOR_UAA       0x00000001              /* Update allocation address */
 #define        MMUOR_ACC       0x00000002              /* TLB access */
 #define        MMUOR_RD        0x00000004              /* TLB access read */
 #define        MMUOR_WR        0x00000000              /* TLB access write */
index fc5b36278d040b0620778cbf1f67499f8db5eb6f..c48d21b68f0485fbce8dc5a85bf553b51ac05642 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Q40 master Chip Control
- * RTC stuff merged for compactnes..
+ * RTC stuff merged for compactness.
 */
 
 #ifndef _Q40_MASTER_H
index 4d2adfb32a2ab4f61951357114a8d053bf74de82..7990b6f50105b19b48417843343778b00c7dc03e 100644 (file)
@@ -60,7 +60,7 @@
  *
  * The host talks to the IOPs using a rather simple message-passing scheme via
  * a shared memory area in the IOP RAM. Each IOP has seven "channels"; each
- * channel is conneced to a specific software driver on the IOP. For example
+ * channel is connected to a specific software driver on the IOP. For example
  * on the SCC IOP there is one channel for each serial port. Each channel has
  * an incoming and and outgoing message queue with a depth of one.
  *
index 759679d9ab96610bec00ec8935063e25f2cfae4d..6d1e760e2a0e21825a4ff54ee83ec7ccca0fa5a3 100644 (file)
@@ -130,7 +130,7 @@ do_fscc=0
        bfextu  %d2{#13,#3},%d0
 .endm
 
-| decode the 8bit diplacement from the brief extension word
+| decode the 8bit displacement from the brief extension word
 .macro fp_decode_disp8
        move.b  %d2,%d0
        ext.w   %d0
index 88fa25fae8bd9ca3065558df052891c1d68d45cd..def2c642f05308b84815fe5bc61b748d882e3d86 100644 (file)
@@ -69,16 +69,44 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        return result;                                                  \
 }
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)                                            \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       int result, temp;                                               \
+                                                                       \
+       smp_mb();                                                       \
+                                                                       \
+       asm volatile (                                                  \
+               "1:     LNKGETD %1, [%2]\n"                             \
+               "       " #op " %0, %1, %3\n"                           \
+               "       LNKSETD [%2], %0\n"                             \
+               "       DEFR    %0, TXSTAT\n"                           \
+               "       ANDT    %0, %0, #HI(0x3f000000)\n"              \
+               "       CMPT    %0, #HI(0x02000000)\n"                  \
+               "       BNZ 1b\n"                                       \
+               : "=&d" (temp), "=&d" (result)                          \
+               : "da" (&v->counter), "bd" (i)                          \
+               : "cc");                                                \
+                                                                       \
+       smp_mb();                                                       \
+                                                                       \
+       return result;                                                  \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 0295d9b8d5bf2730dc9fe3fed7ab523444b2354e..6c1380a8a0d4a437dfb2a4146deada934bcdb354 100644 (file)
@@ -64,15 +64,40 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        return result;                                                  \
 }
 
-#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
+#define ATOMIC_FETCH_OP(op, c_op)                                      \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       unsigned long result;                                           \
+       unsigned long flags;                                            \
+                                                                       \
+       __global_lock1(flags);                                          \
+       result = v->counter;                                            \
+       fence();                                                        \
+       v->counter c_op i;                                              \
+       __global_unlock1(flags);                                        \
+                                                                       \
+       return result;                                                  \
+}
+
+#define ATOMIC_OPS(op, c_op)                                           \
+       ATOMIC_OP(op, c_op)                                             \
+       ATOMIC_OP_RETURN(op, c_op)                                      \
+       ATOMIC_FETCH_OP(op, c_op)
 
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or, |=)
-ATOMIC_OP(xor, ^=)
 
 #undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op)                                           \
+       ATOMIC_OP(op, c_op)                                             \
+       ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &=)
+ATOMIC_OPS(or, |=)
+ATOMIC_OPS(xor, ^=)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 86a7cf3d1386ca6000e111780a87662056b2a427..c0c7a22be1aeff1769308825d9d739837c8baaaa 100644 (file)
@@ -1,14 +1,24 @@
 #ifndef __ASM_SPINLOCK_H
 #define __ASM_SPINLOCK_H
 
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
 #ifdef CONFIG_METAG_ATOMICITY_LOCK1
 #include <asm/spinlock_lock1.h>
 #else
 #include <asm/spinlock_lnkget.h>
 #endif
 
-#define arch_spin_unlock_wait(lock) \
-       do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+/*
+ * both lock1 and lnkget are test-and-set spinlocks with 0 unlocked and 1
+ * locked.
+ */
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->lock, !VAL);
+}
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
index 67e2ef48d2d0156fc9a4ab3c62e46867a43a04a5..5bbf38b916ef36839396c01cfd5bc105245ae934 100644 (file)
@@ -170,7 +170,7 @@ static struct irqaction timer_irqaction = {
        .dev_id = &clockevent_xilinx_timer,
 };
 
-static __init void xilinx_clockevent_init(void)
+static __init int xilinx_clockevent_init(void)
 {
        clockevent_xilinx_timer.mult =
                div_sc(timer_clock_freq, NSEC_PER_SEC,
@@ -181,6 +181,8 @@ static __init void xilinx_clockevent_init(void)
                clockevent_delta2ns(1, &clockevent_xilinx_timer);
        clockevent_xilinx_timer.cpumask = cpumask_of(0);
        clockevents_register_device(&clockevent_xilinx_timer);
+
+       return 0;
 }
 
 static u64 xilinx_clock_read(void)
@@ -229,8 +231,14 @@ static struct clocksource clocksource_microblaze = {
 
 static int __init xilinx_clocksource_init(void)
 {
-       if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq))
-               panic("failed to register clocksource");
+       int ret;
+
+       ret = clocksource_register_hz(&clocksource_microblaze,
+                                     timer_clock_freq);
+       if (ret) {
+               pr_err("failed to register clocksource");
+               return ret;
+       }
 
        /* stop timer1 */
        write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT,
@@ -239,16 +247,16 @@ static int __init xilinx_clocksource_init(void)
        write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1);
 
        /* register timecounter - for ftrace support */
-       init_xilinx_timecounter();
-       return 0;
+       return init_xilinx_timecounter();
 }
 
-static void __init xilinx_timer_init(struct device_node *timer)
+static int __init xilinx_timer_init(struct device_node *timer)
 {
        struct clk *clk;
        static int initialized;
        u32 irq;
        u32 timer_num = 1;
+       int ret;
 
        if (initialized)
                return;
@@ -258,7 +266,7 @@ static void __init xilinx_timer_init(struct device_node *timer)
        timer_baseaddr = of_iomap(timer, 0);
        if (!timer_baseaddr) {
                pr_err("ERROR: invalid timer base address\n");
-               BUG();
+               return -ENXIO;
        }
 
        write_fn = timer_write32;
@@ -271,11 +279,15 @@ static void __init xilinx_timer_init(struct device_node *timer)
        }
 
        irq = irq_of_parse_and_map(timer, 0);
+       if (irq <= 0) {
+               pr_err("Failed to parse and map irq");
+               return -EINVAL;
+       }
 
        of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
        if (timer_num) {
-               pr_emerg("Please enable two timers in HW\n");
-               BUG();
+               pr_err("Please enable two timers in HW\n");
+               return -EINVAL;
        }
 
        pr_info("%s: irq=%d\n", timer->full_name, irq);
@@ -297,14 +309,27 @@ static void __init xilinx_timer_init(struct device_node *timer)
 
        freq_div_hz = timer_clock_freq / HZ;
 
-       setup_irq(irq, &timer_irqaction);
+       ret = setup_irq(irq, &timer_irqaction);
+       if (ret) {
+               pr_err("Failed to setup IRQ");
+               return ret;
+       }
+
 #ifdef CONFIG_HEART_BEAT
        microblaze_setup_heartbeat();
 #endif
-       xilinx_clocksource_init();
-       xilinx_clockevent_init();
+
+       ret = xilinx_clocksource_init();
+       if (ret)
+               return ret;
+
+       ret = xilinx_clockevent_init();
+       if (ret)
+               return ret;
 
        sched_clock_register(xilinx_clock_read, 32, timer_clock_freq);
+
+       return 0;
 }
 
 CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
index 835b402e4574fa1a27da780da9ae398bbf3bb276..0ab176bdb8e8107e3a8b700215ce9a96471b3ee9 100644 (file)
@@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v)                             \
                        "       " #asm_op " %0, %2                      \n"   \
                        "       sc      %0, %1                          \n"   \
                        "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)  \
                        : "Ir" (i));                                          \
                } while (unlikely(!temp));                                    \
        } else {                                                              \
@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v)                           \
 }
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)                                   \
-static __inline__ int atomic_##op##_return(int i, atomic_t * v)                      \
+static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)              \
 {                                                                            \
        int result;                                                           \
                                                                              \
-       smp_mb__before_llsc();                                                \
-                                                                             \
        if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
                int temp;                                                     \
                                                                              \
@@ -125,23 +123,84 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v)                 \
                raw_local_irq_restore(flags);                                 \
        }                                                                     \
                                                                              \
-       smp_llsc_mb();                                                        \
+       return result;                                                        \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op)                                    \
+static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)       \
+{                                                                            \
+       int result;                                                           \
+                                                                             \
+       if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
+               int temp;                                                     \
+                                                                             \
+               __asm__ __volatile__(                                         \
+               "       .set    arch=r4000                              \n"   \
+               "1:     ll      %1, %2          # atomic_fetch_" #op "  \n"   \
+               "       " #asm_op " %0, %1, %3                          \n"   \
+               "       sc      %0, %2                                  \n"   \
+               "       beqzl   %0, 1b                                  \n"   \
+               "       move    %0, %1                                  \n"   \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (result), "=&r" (temp),                               \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
+               : "Ir" (i));                                                  \
+       } else if (kernel_uses_llsc) {                                        \
+               int temp;                                                     \
+                                                                             \
+               do {                                                          \
+                       __asm__ __volatile__(                                 \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
+                       "       ll      %1, %2  # atomic_fetch_" #op "  \n"   \
+                       "       " #asm_op " %0, %1, %3                  \n"   \
+                       "       sc      %0, %2                          \n"   \
+                       "       .set    mips0                           \n"   \
+                       : "=&r" (result), "=&r" (temp),                       \
+                         "+" GCC_OFF_SMALL_ASM() (v->counter)                \
+                       : "Ir" (i));                                          \
+               } while (unlikely(!result));                                  \
+                                                                             \
+               result = temp;                                                \
+       } else {                                                              \
+               unsigned long flags;                                          \
+                                                                             \
+               raw_local_irq_save(flags);                                    \
+               result = v->counter;                                          \
+               v->counter c_op i;                                            \
+               raw_local_irq_restore(flags);                                 \
+       }                                                                     \
                                                                              \
        return result;                                                        \
 }
 
 #define ATOMIC_OPS(op, c_op, asm_op)                                         \
        ATOMIC_OP(op, c_op, asm_op)                                           \
-       ATOMIC_OP_RETURN(op, c_op, asm_op)
+       ATOMIC_OP_RETURN(op, c_op, asm_op)                                    \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC_OPS(add, +=, addu)
 ATOMIC_OPS(sub, -=, subu)
 
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, xor)
+#define atomic_add_return_relaxed      atomic_add_return_relaxed
+#define atomic_sub_return_relaxed      atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed       atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed       atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op)                                         \
+       ATOMIC_OP(op, c_op, asm_op)                                           \
+       ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
+
+#define atomic_fetch_and_relaxed       atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed                atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed       atomic_fetch_xor_relaxed
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
@@ -362,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v)                    \
 }
 
 #define ATOMIC64_OP_RETURN(op, c_op, asm_op)                                 \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)        \
+static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
 {                                                                            \
        long result;                                                          \
                                                                              \
-       smp_mb__before_llsc();                                                \
-                                                                             \
        if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
                long temp;                                                    \
                                                                              \
@@ -409,22 +466,85 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)           \
                raw_local_irq_restore(flags);                                 \
        }                                                                     \
                                                                              \
-       smp_llsc_mb();                                                        \
+       return result;                                                        \
+}
+
+#define ATOMIC64_FETCH_OP(op, c_op, asm_op)                                  \
+static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)  \
+{                                                                            \
+       long result;                                                          \
+                                                                             \
+       if (kernel_uses_llsc && R10000_LLSC_WAR) {                            \
+               long temp;                                                    \
+                                                                             \
+               __asm__ __volatile__(                                         \
+               "       .set    arch=r4000                              \n"   \
+               "1:     lld     %1, %2          # atomic64_fetch_" #op "\n"   \
+               "       " #asm_op " %0, %1, %3                          \n"   \
+               "       scd     %0, %2                                  \n"   \
+               "       beqzl   %0, 1b                                  \n"   \
+               "       move    %0, %1                                  \n"   \
+               "       .set    mips0                                   \n"   \
+               : "=&r" (result), "=&r" (temp),                               \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
+               : "Ir" (i));                                                  \
+       } else if (kernel_uses_llsc) {                                        \
+               long temp;                                                    \
+                                                                             \
+               do {                                                          \
+                       __asm__ __volatile__(                                 \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
+                       "       lld     %1, %2  # atomic64_fetch_" #op "\n"   \
+                       "       " #asm_op " %0, %1, %3                  \n"   \
+                       "       scd     %0, %2                          \n"   \
+                       "       .set    mips0                           \n"   \
+                       : "=&r" (result), "=&r" (temp),                       \
+                         "=" GCC_OFF_SMALL_ASM() (v->counter)                \
+                       : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)          \
+                       : "memory");                                          \
+               } while (unlikely(!result));                                  \
+                                                                             \
+               result = temp;                                                \
+       } else {                                                              \
+               unsigned long flags;                                          \
+                                                                             \
+               raw_local_irq_save(flags);                                    \
+               result = v->counter;                                          \
+               v->counter c_op i;                                            \
+               raw_local_irq_restore(flags);                                 \
+       }                                                                     \
                                                                              \
        return result;                                                        \
 }
 
 #define ATOMIC64_OPS(op, c_op, asm_op)                                       \
        ATOMIC64_OP(op, c_op, asm_op)                                         \
-       ATOMIC64_OP_RETURN(op, c_op, asm_op)
+       ATOMIC64_OP_RETURN(op, c_op, asm_op)                                  \
+       ATOMIC64_FETCH_OP(op, c_op, asm_op)
 
 ATOMIC64_OPS(add, +=, daddu)
 ATOMIC64_OPS(sub, -=, dsubu)
-ATOMIC64_OP(and, &=, and)
-ATOMIC64_OP(or, |=, or)
-ATOMIC64_OP(xor, ^=, xor)
+
+#define atomic64_add_return_relaxed    atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed    atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed     atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed     atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op, asm_op)                                       \
+       ATOMIC64_OP(op, c_op, asm_op)                                         \
+       ATOMIC64_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC64_OPS(and, &=, and)
+ATOMIC64_OPS(or, |=, or)
+ATOMIC64_OPS(xor, ^=, xor)
+
+#define atomic64_fetch_and_relaxed     atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed      atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed     atomic64_fetch_xor_relaxed
 
 #undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
index f53816744d60e1a28b6b1c9687fd5f4dce0048b2..7d44e888134f9621d8056ac56fdf6916394619b7 100644 (file)
@@ -633,7 +633,7 @@ static inline struct page *pmd_page(pmd_t pmd)
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 {
-       pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) |
+       pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
                       (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
        return pmd;
 }
index 40196bebe849a0c825cc8a61e40b03440d14f31e..f485afe51514765710eaf990b674e92378effe61 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/compiler.h>
 
 #include <asm/barrier.h>
+#include <asm/processor.h>
 #include <asm/compiler.h>
 #include <asm/war.h>
 
@@ -48,8 +49,22 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 }
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
-       while (arch_spin_is_locked(x)) { cpu_relax(); }
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       u16 owner = READ_ONCE(lock->h.serving_now);
+       smp_rmb();
+       for (;;) {
+               arch_spinlock_t tmp = READ_ONCE(*lock);
+
+               if (tmp.h.serving_now == tmp.h.ticket ||
+                   tmp.h.serving_now != owner)
+                       break;
+
+               cpu_relax();
+       }
+       smp_acquire__after_ctrl_dep();
+}
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
index 3ad0b0794f7d09d5953bc26d59a27c717792ea6b..f24eee04e16af6df0a73266d9f346de9636b33d7 100644 (file)
@@ -117,11 +117,13 @@ static int systick_set_oneshot(struct clock_event_device *evt)
        return 0;
 }
 
-static void __init ralink_systick_init(struct device_node *np)
+static int __init ralink_systick_init(struct device_node *np)
 {
+       int ret;
+
        systick.membase = of_iomap(np, 0);
        if (!systick.membase)
-               return;
+               return -ENXIO;
 
        systick_irqaction.name = np->name;
        systick.dev.name = np->name;
@@ -131,16 +133,21 @@ static void __init ralink_systick_init(struct device_node *np)
        systick.dev.irq = irq_of_parse_and_map(np, 0);
        if (!systick.dev.irq) {
                pr_err("%s: request_irq failed", np->name);
-               return;
+               return -EINVAL;
        }
 
-       clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
-                       SYSTICK_FREQ, 301, 16, clocksource_mmio_readl_up);
+       ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+                                   SYSTICK_FREQ, 301, 16,
+                                   clocksource_mmio_readl_up);
+       if (ret)
+               return ret;
 
        clockevents_register_device(&systick.dev);
 
        pr_info("%s: running - mult: %d, shift: %d\n",
                        np->name, systick.dev.mult, systick.dev.shift);
+
+       return 0;
 }
 
 CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
index ce318d5ab23b06484e063d59e564bef6a2673700..36389efd45e8a2e97ae65d6f3630df5b7621dc35 100644 (file)
@@ -84,16 +84,41 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        return retval;                                                  \
 }
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)                                            \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       int retval, status;                                             \
+                                                                       \
+       asm volatile(                                                   \
+               "1:     mov     %4,(_AAR,%3)    \n"                     \
+               "       mov     (_ADR,%3),%1    \n"                     \
+               "       mov     %1,%0           \n"                     \
+               "       " #op " %5,%0           \n"                     \
+               "       mov     %0,(_ADR,%3)    \n"                     \
+               "       mov     (_ADR,%3),%0    \n"     /* flush */     \
+               "       mov     (_ASR,%3),%0    \n"                     \
+               "       or      %0,%0           \n"                     \
+               "       bne     1b              \n"                     \
+               : "=&r"(status), "=&r"(retval), "=m"(v->counter)        \
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)   \
+               : "memory", "cc");                                      \
+       return retval;                                                  \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 1ae580f3893344fe7ad99c71b4c5fa19c0692870..9c7b8f7942d8e9aadf3008875774b55a37959ae9 100644 (file)
@@ -12,6 +12,8 @@
 #define _ASM_SPINLOCK_H
 
 #include <linux/atomic.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
 #include <asm/rwlock.h>
 #include <asm/page.h>
 
  */
 
 #define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
-#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->slock, !VAL);
+}
 
 static inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
index e835dda2bfe2d5342f0b87f763476ebd9ae81875..d9563ddb337eab4e44d052ebd285206b09788f13 100644 (file)
@@ -206,15 +206,21 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void __init nios2_timer_get_base_and_freq(struct device_node *np,
+static int __init nios2_timer_get_base_and_freq(struct device_node *np,
                                void __iomem **base, u32 *freq)
 {
        *base = of_iomap(np, 0);
-       if (!*base)
-               panic("Unable to map reg for %s\n", np->name);
+       if (!*base) {
+               pr_crit("Unable to map reg for %s\n", np->name);
+               return -ENXIO;
+       }
+
+       if (of_property_read_u32(np, "clock-frequency", freq)) {
+               pr_crit("Unable to get %s clock frequency\n", np->name);
+               return -EINVAL;
+       }
 
-       if (of_property_read_u32(np, "clock-frequency", freq))
-               panic("Unable to get %s clock frequency\n", np->name);
+       return 0;
 }
 
 static struct nios2_clockevent_dev nios2_ce = {
@@ -231,17 +237,21 @@ static struct nios2_clockevent_dev nios2_ce = {
        },
 };
 
-static __init void nios2_clockevent_init(struct device_node *timer)
+static __init int nios2_clockevent_init(struct device_node *timer)
 {
        void __iomem *iobase;
        u32 freq;
-       int irq;
+       int irq, ret;
 
-       nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+       ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+       if (ret)
+               return ret;
 
        irq = irq_of_parse_and_map(timer, 0);
-       if (!irq)
-               panic("Unable to parse timer irq\n");
+       if (!irq) {
+               pr_crit("Unable to parse timer irq\n");
+               return -EINVAL;
+       }
 
        nios2_ce.timer.base = iobase;
        nios2_ce.timer.freq = freq;
@@ -253,25 +263,35 @@ static __init void nios2_clockevent_init(struct device_node *timer)
        /* clear pending interrupt */
        timer_writew(&nios2_ce.timer, 0, ALTERA_TIMER_STATUS_REG);
 
-       if (request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
-               &nios2_ce.ced))
-               panic("Unable to setup timer irq\n");
+       ret = request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
+                         &nios2_ce.ced);
+       if (ret) {
+               pr_crit("Unable to setup timer irq\n");
+               return ret;
+       }
 
        clockevents_config_and_register(&nios2_ce.ced, freq, 1, ULONG_MAX);
+
+       return 0;
 }
 
-static __init void nios2_clocksource_init(struct device_node *timer)
+static __init int nios2_clocksource_init(struct device_node *timer)
 {
        unsigned int ctrl;
        void __iomem *iobase;
        u32 freq;
+       int ret;
 
-       nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+       ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+       if (ret)
+               return ret;
 
        nios2_cs.timer.base = iobase;
        nios2_cs.timer.freq = freq;
 
-       clocksource_register_hz(&nios2_cs.cs, freq);
+       ret = clocksource_register_hz(&nios2_cs.cs, freq);
+       if (ret)
+               return ret;
 
        timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODL_REG);
        timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODH_REG);
@@ -282,6 +302,8 @@ static __init void nios2_clocksource_init(struct device_node *timer)
 
        /* Calibrate the delay loop directly */
        lpj_fine = freq / HZ;
+
+       return 0;
 }
 
 /*
@@ -289,22 +311,25 @@ static __init void nios2_clocksource_init(struct device_node *timer)
  * more instances, the second one gets used as clocksource and all
  * others are unused.
 */
-static void __init nios2_time_init(struct device_node *timer)
+static int __init nios2_time_init(struct device_node *timer)
 {
        static int num_called;
+       int ret;
 
        switch (num_called) {
        case 0:
-               nios2_clockevent_init(timer);
+               ret = nios2_clockevent_init(timer);
                break;
        case 1:
-               nios2_clocksource_init(timer);
+               ret = nios2_clocksource_init(timer);
                break;
        default:
                break;
        }
 
        num_called++;
+
+       return ret;
 }
 
 void read_persistent_clock(struct timespec *ts)
index 1d109990a02242aafb76c19da65a2f74d34367b9..5394b9c5f914fdc4339224a3950e9d1c4ccd6bbb 100644 (file)
@@ -121,16 +121,39 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v)            \
        return ret;                                                     \
 }
 
-#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
+#define ATOMIC_FETCH_OP(op, c_op)                                      \
+static __inline__ int atomic_fetch_##op(int i, atomic_t *v)            \
+{                                                                      \
+       unsigned long flags;                                            \
+       int ret;                                                        \
+                                                                       \
+       _atomic_spin_lock_irqsave(v, flags);                            \
+       ret = v->counter;                                               \
+       v->counter c_op i;                                              \
+       _atomic_spin_unlock_irqrestore(v, flags);                       \
+                                                                       \
+       return ret;                                                     \
+}
+
+#define ATOMIC_OPS(op, c_op)                                           \
+       ATOMIC_OP(op, c_op)                                             \
+       ATOMIC_OP_RETURN(op, c_op)                                      \
+       ATOMIC_FETCH_OP(op, c_op)
 
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
 
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or, |=)
-ATOMIC_OP(xor, ^=)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op)                                           \
+       ATOMIC_OP(op, c_op)                                             \
+       ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &=)
+ATOMIC_OPS(or, |=)
+ATOMIC_OPS(xor, ^=)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
@@ -185,15 +208,39 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)        \
        return ret;                                                     \
 }
 
-#define ATOMIC64_OPS(op, c_op) ATOMIC64_OP(op, c_op) ATOMIC64_OP_RETURN(op, c_op)
+#define ATOMIC64_FETCH_OP(op, c_op)                                    \
+static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v)                \
+{                                                                      \
+       unsigned long flags;                                            \
+       s64 ret;                                                        \
+                                                                       \
+       _atomic_spin_lock_irqsave(v, flags);                            \
+       ret = v->counter;                                               \
+       v->counter c_op i;                                              \
+       _atomic_spin_unlock_irqrestore(v, flags);                       \
+                                                                       \
+       return ret;                                                     \
+}
+
+#define ATOMIC64_OPS(op, c_op)                                         \
+       ATOMIC64_OP(op, c_op)                                           \
+       ATOMIC64_OP_RETURN(op, c_op)                                    \
+       ATOMIC64_FETCH_OP(op, c_op)
 
 ATOMIC64_OPS(add, +=)
 ATOMIC64_OPS(sub, -=)
-ATOMIC64_OP(and, &=)
-ATOMIC64_OP(or, |=)
-ATOMIC64_OP(xor, ^=)
 
 #undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op)                                         \
+       ATOMIC64_OP(op, c_op)                                           \
+       ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &=)
+ATOMIC64_OPS(or, |=)
+ATOMIC64_OPS(xor, ^=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
index 64f2992e439fcd81811e7a44d322bdc5aafd9ff1..e32936cd7f1017a418bf1f086cfc819640616260 100644 (file)
@@ -13,8 +13,13 @@ static inline int arch_spin_is_locked(arch_spinlock_t *x)
 }
 
 #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
-#define arch_spin_unlock_wait(x) \
-               do { cpu_relax(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *x)
+{
+       volatile unsigned int *a = __ldcw_align(x);
+
+       smp_cond_load_acquire(a, VAL);
+}
 
 static inline void arch_spin_lock_flags(arch_spinlock_t *x,
                                         unsigned long flags)
index ae0751ef8788fd55f7c4d6b37a8fbb73d82d317f..f08d567e0ca4dfb75d560afa6b72473643f12c56 100644 (file)
@@ -78,21 +78,53 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v)  \
        return t;                                                       \
 }
 
+#define ATOMIC_FETCH_OP_RELAXED(op, asm_op)                            \
+static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v)      \
+{                                                                      \
+       int res, t;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+"1:    lwarx   %0,0,%4         # atomic_fetch_" #op "_relaxed\n"       \
+       #asm_op " %1,%3,%0\n"                                           \
+       PPC405_ERR77(0, %4)                                             \
+"      stwcx.  %1,0,%4\n"                                              \
+"      bne-    1b\n"                                                   \
+       : "=&r" (res), "=&r" (t), "+m" (v->counter)                     \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
+                                                                       \
+       return res;                                                     \
+}
+
 #define ATOMIC_OPS(op, asm_op)                                         \
        ATOMIC_OP(op, asm_op)                                           \
-       ATOMIC_OP_RETURN_RELAXED(op, asm_op)
+       ATOMIC_OP_RETURN_RELAXED(op, asm_op)                            \
+       ATOMIC_FETCH_OP_RELAXED(op, asm_op)
 
 ATOMIC_OPS(add, add)
 ATOMIC_OPS(sub, subf)
 
-ATOMIC_OP(and, and)
-ATOMIC_OP(or, or)
-ATOMIC_OP(xor, xor)
-
 #define atomic_add_return_relaxed atomic_add_return_relaxed
 #define atomic_sub_return_relaxed atomic_sub_return_relaxed
 
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, asm_op)                                         \
+       ATOMIC_OP(op, asm_op)                                           \
+       ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, xor)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed  atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP_RELAXED
 #undef ATOMIC_OP_RETURN_RELAXED
 #undef ATOMIC_OP
 
@@ -329,20 +361,53 @@ atomic64_##op##_return_relaxed(long a, atomic64_t *v)                     \
        return t;                                                       \
 }
 
+#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op)                          \
+static inline long                                                     \
+atomic64_fetch_##op##_relaxed(long a, atomic64_t *v)                   \
+{                                                                      \
+       long res, t;                                                    \
+                                                                       \
+       __asm__ __volatile__(                                           \
+"1:    ldarx   %0,0,%4         # atomic64_fetch_" #op "_relaxed\n"     \
+       #asm_op " %1,%3,%0\n"                                           \
+"      stdcx.  %1,0,%4\n"                                              \
+"      bne-    1b\n"                                                   \
+       : "=&r" (res), "=&r" (t), "+m" (v->counter)                     \
+       : "r" (a), "r" (&v->counter)                                    \
+       : "cc");                                                        \
+                                                                       \
+       return res;                                                     \
+}
+
 #define ATOMIC64_OPS(op, asm_op)                                       \
        ATOMIC64_OP(op, asm_op)                                         \
-       ATOMIC64_OP_RETURN_RELAXED(op, asm_op)
+       ATOMIC64_OP_RETURN_RELAXED(op, asm_op)                          \
+       ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
 
 ATOMIC64_OPS(add, add)
 ATOMIC64_OPS(sub, subf)
-ATOMIC64_OP(and, and)
-ATOMIC64_OP(or, or)
-ATOMIC64_OP(xor, xor)
 
 #define atomic64_add_return_relaxed atomic64_add_return_relaxed
 #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
 
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, asm_op)                                       \
+       ATOMIC64_OP(op, asm_op)                                         \
+       ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
+
+ATOMIC64_OPS(and, and)
+ATOMIC64_OPS(or, or)
+ATOMIC64_OPS(xor, xor)
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed  atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+
 #undef ATOPIC64_OPS
+#undef ATOMIC64_FETCH_OP_RELAXED
 #undef ATOMIC64_OP_RETURN_RELAXED
 #undef ATOMIC64_OP
 
index 127ab23e1f6ccdbe038b6cfdd84c569742968e27..078155fa118992f1f456d9643786f7ccf7d19741 100644 (file)
@@ -124,7 +124,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-       if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1))
+       if (likely(atomic_read(count) == 1 && __mutex_cmpxchg_lock(count, 1, 0) == 1))
                return 1;
        return 0;
 }
index 911064aa59b2f25c5c1dc7a7ac846a560ea99b1e..d28cc2f5b7b2c706fe46ea8c93b5d59fa4ebc263 100644 (file)
@@ -93,6 +93,11 @@ static inline int atomic_add_return(int i, atomic_t *v)
        return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
 }
 
+static inline int atomic_fetch_add(int i, atomic_t *v)
+{
+       return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER);
+}
+
 static inline void atomic_add(int i, atomic_t *v)
 {
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -114,22 +119,27 @@ static inline void atomic_add(int i, atomic_t *v)
 #define atomic_inc_and_test(_v)                (atomic_add_return(1, _v) == 0)
 #define atomic_sub(_i, _v)             atomic_add(-(int)(_i), _v)
 #define atomic_sub_return(_i, _v)      atomic_add_return(-(int)(_i), _v)
+#define atomic_fetch_sub(_i, _v)       atomic_fetch_add(-(int)(_i), _v)
 #define atomic_sub_and_test(_i, _v)    (atomic_sub_return(_i, _v) == 0)
 #define atomic_dec(_v)                 atomic_sub(1, _v)
 #define atomic_dec_return(_v)          atomic_sub_return(1, _v)
 #define atomic_dec_and_test(_v)                (atomic_sub_return(1, _v) == 0)
 
-#define ATOMIC_OP(op, OP)                                              \
+#define ATOMIC_OPS(op, OP)                                             \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
        __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER);        \
+}                                                                      \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       return __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_BARRIER);    \
 }
 
-ATOMIC_OP(and, AND)
-ATOMIC_OP(or, OR)
-ATOMIC_OP(xor, XOR)
+ATOMIC_OPS(and, AND)
+ATOMIC_OPS(or, OR)
+ATOMIC_OPS(xor, XOR)
 
-#undef ATOMIC_OP
+#undef ATOMIC_OPS
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
@@ -236,6 +246,11 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
        return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
 }
 
+static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+{
+       return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER);
+}
+
 static inline void atomic64_add(long long i, atomic64_t *v)
 {
 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
@@ -264,17 +279,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
        return old;
 }
 
-#define ATOMIC64_OP(op, OP)                                            \
+#define ATOMIC64_OPS(op, OP)                                           \
 static inline void atomic64_##op(long i, atomic64_t *v)                        \
 {                                                                      \
        __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER);  \
+}                                                                      \
+static inline long atomic64_fetch_##op(long i, atomic64_t *v)          \
+{                                                                      \
+       return __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_BARRIER); \
 }
 
-ATOMIC64_OP(and, AND)
-ATOMIC64_OP(or, OR)
-ATOMIC64_OP(xor, XOR)
+ATOMIC64_OPS(and, AND)
+ATOMIC64_OPS(or, OR)
+ATOMIC64_OPS(xor, XOR)
 
-#undef ATOMIC64_OP
+#undef ATOMIC64_OPS
 #undef __ATOMIC64_LOOP
 
 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
@@ -315,6 +334,7 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
 #define atomic64_inc_return(_v)                atomic64_add_return(1, _v)
 #define atomic64_inc_and_test(_v)      (atomic64_add_return(1, _v) == 0)
 #define atomic64_sub_return(_i, _v)    atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_fetch_sub(_i, _v)     atomic64_fetch_add(-(long long)(_i), _v)
 #define atomic64_sub(_i, _v)           atomic64_add(-(long long)(_i), _v)
 #define atomic64_sub_and_test(_i, _v)  (atomic64_sub_return(_i, _v) == 0)
 #define atomic64_dec(_v)               atomic64_sub(1, _v)
index c75e4471e618826a385c6aef2955b79c757ab9e7..597e7e96b59e8b9bb2948d9ae43799641abcc375 100644 (file)
@@ -207,41 +207,4 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-       signed long old, new;
-
-       asm volatile(
-               "       lg      %0,%2\n"
-               "0:     lgr     %1,%0\n"
-               "       agr     %1,%4\n"
-               "       csg     %0,%1,%2\n"
-               "       jl      0b"
-               : "=&d" (old), "=&d" (new), "=Q" (sem->count)
-               : "Q" (sem->count), "d" (delta)
-               : "cc", "memory");
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-       signed long old, new;
-
-       asm volatile(
-               "       lg      %0,%2\n"
-               "0:     lgr     %1,%0\n"
-               "       agr     %1,%4\n"
-               "       csg     %0,%1,%2\n"
-               "       jl      0b"
-               : "=&d" (old), "=&d" (new), "=Q" (sem->count)
-               : "Q" (sem->count), "d" (delta)
-               : "cc", "memory");
-       return new;
-}
-
 #endif /* _S390_RWSEM_H */
index 63ebf37d31438a647b8d38177b9cb107995e3e99..7e9e09f600fa5932849948de3bc4f66ea871cba2 100644 (file)
@@ -10,6 +10,8 @@
 #define __ASM_SPINLOCK_H
 
 #include <linux/smp.h>
+#include <asm/barrier.h>
+#include <asm/processor.h>
 
 #define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
 
@@ -97,6 +99,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
        while (arch_spin_is_locked(lock))
                arch_spin_relax(lock);
+       smp_acquire__after_ctrl_dep();
 }
 
 /*
index b94df40e5f2d2b701fdb7c0858b06ab745f6b65e..d755e96c3064c132317eb2357b36fc106f1b68b8 100644 (file)
@@ -43,16 +43,42 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        return tmp;                                                     \
 }
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)                                            \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       int res, tmp;                                                   \
+                                                                       \
+       __asm__ __volatile__ (                                          \
+               "   .align 2              \n\t"                         \
+               "   mova    1f,   r0      \n\t" /* r0 = end point */    \
+               "   mov    r15,   r1      \n\t" /* r1 = saved sp */     \
+               "   mov    #-6,   r15     \n\t" /* LOGIN: r15 = size */ \
+               "   mov.l  @%2,   %0      \n\t" /* load old value */    \
+               "   mov     %0,   %1      \n\t" /* save old value */    \
+               " " #op "   %3,   %0      \n\t" /* $op */               \
+               "   mov.l   %0,   @%2     \n\t" /* store new value */   \
+               "1: mov     r1,   r15     \n\t" /* LOGOUT */            \
+               : "=&r" (tmp), "=&r" (res), "+r"  (v)                   \
+               : "r"   (i)                                             \
+               : "memory" , "r0", "r1");                               \
+                                                                       \
+       return res;                                                     \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 23fcdad5773eea21cd58715dd76ed974fcdf15ee..8e2da5fa017830364296d4ac16773ee20dc1c4fe 100644 (file)
@@ -33,15 +33,38 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        return temp;                                                    \
 }
 
-#define ATOMIC_OPS(op, c_op) ATOMIC_OP(op, c_op) ATOMIC_OP_RETURN(op, c_op)
+#define ATOMIC_FETCH_OP(op, c_op)                                      \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       unsigned long temp, flags;                                      \
+                                                                       \
+       raw_local_irq_save(flags);                                      \
+       temp = v->counter;                                              \
+       v->counter c_op i;                                              \
+       raw_local_irq_restore(flags);                                   \
+                                                                       \
+       return temp;                                                    \
+}
+
+#define ATOMIC_OPS(op, c_op)                                           \
+       ATOMIC_OP(op, c_op)                                             \
+       ATOMIC_OP_RETURN(op, c_op)                                      \
+       ATOMIC_FETCH_OP(op, c_op)
 
 ATOMIC_OPS(add, +=)
 ATOMIC_OPS(sub, -=)
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or, |=)
-ATOMIC_OP(xor, ^=)
 
 #undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op)                                           \
+       ATOMIC_OP(op, c_op)                                             \
+       ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &=)
+ATOMIC_OPS(or, |=)
+ATOMIC_OPS(xor, ^=)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 33d34b16d4d67bb15ed129d1233c110640de9d7b..caea2c45f6c2ac43ee6c134c1b4c9a7ebc8295e3 100644 (file)
@@ -48,15 +48,39 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        return temp;                                                    \
 }
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)                                            \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       unsigned long res, temp;                                        \
+                                                                       \
+       __asm__ __volatile__ (                                          \
+"1:    movli.l @%3, %0         ! atomic_fetch_" #op "  \n"             \
+"      mov %0, %1                                      \n"             \
+"      " #op " %2, %0                                  \n"             \
+"      movco.l %0, @%3                                 \n"             \
+"      bf      1b                                      \n"             \
+"      synco                                           \n"             \
+       : "=&z" (temp), "=&z" (res)                                     \
+       : "r" (i), "r" (&v->counter)                                    \
+       : "t");                                                         \
+                                                                       \
+       return res;                                                     \
+}
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index bdc0f3b6c56afdf23d6b628128e6171cc2552b74..416834b60ad0cce9584e296a110f35e6c8481f23 100644 (file)
 #error "Need movli.l/movco.l for spinlocks"
 #endif
 
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  */
 
 #define arch_spin_is_locked(x)         ((x)->lock <= 0)
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
-       do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->lock, VAL > 0);
+}
 
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
index 7dcbebbcaec6f0c742f941e580bc6564b1158c13..ee3f11c43cdaa2ef6ab6fe444130e8dd2a8a5541 100644 (file)
 #define ATOMIC_INIT(i)  { (i) }
 
 int atomic_add_return(int, atomic_t *);
-void atomic_and(int, atomic_t *);
-void atomic_or(int, atomic_t *);
-void atomic_xor(int, atomic_t *);
+int atomic_fetch_add(int, atomic_t *);
+int atomic_fetch_and(int, atomic_t *);
+int atomic_fetch_or(int, atomic_t *);
+int atomic_fetch_xor(int, atomic_t *);
 int atomic_cmpxchg(atomic_t *, int, int);
 int atomic_xchg(atomic_t *, int);
 int __atomic_add_unless(atomic_t *, int, int);
@@ -35,7 +36,13 @@ void atomic_set(atomic_t *, int);
 #define atomic_inc(v)          ((void)atomic_add_return(        1, (v)))
 #define atomic_dec(v)          ((void)atomic_add_return(       -1, (v)))
 
+#define atomic_and(i, v)       ((void)atomic_fetch_and((i), (v)))
+#define atomic_or(i, v)                ((void)atomic_fetch_or((i), (v)))
+#define atomic_xor(i, v)       ((void)atomic_fetch_xor((i), (v)))
+
 #define atomic_sub_return(i, v)        (atomic_add_return(-(int)(i), (v)))
+#define atomic_fetch_sub(i, v)  (atomic_fetch_add (-(int)(i), (v)))
+
 #define atomic_inc_return(v)   (atomic_add_return(        1, (v)))
 #define atomic_dec_return(v)   (atomic_add_return(       -1, (v)))
 
index f2fbf9e16fafca66c4aa01145645f1748775cde2..24827a3f733a9b23e74509a16089a1790c8a16e8 100644 (file)
@@ -28,16 +28,24 @@ void atomic64_##op(long, atomic64_t *);
 int atomic_##op##_return(int, atomic_t *);                             \
 long atomic64_##op##_return(long, atomic64_t *);
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)                                            \
+int atomic_fetch_##op(int, atomic_t *);                                        \
+long atomic64_fetch_##op(long, atomic64_t *);
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index bcc98fc35281c73bed1ae1e35cb4d56aade44794..d9c5876c61215494df0238992da09c03f5d82211 100644 (file)
@@ -9,12 +9,15 @@
 #ifndef __ASSEMBLY__
 
 #include <asm/psr.h>
+#include <asm/barrier.h>
 #include <asm/processor.h> /* for cpu_relax */
 
 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0)
 
-#define arch_spin_unlock_wait(lock) \
-       do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->lock, !VAL);
+}
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
index 9689176949781fc5ae8985a794def4521e6ba72b..87990b7c6b0d693eb4c715f33047e5d8cfa0e5c6 100644 (file)
@@ -8,6 +8,9 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
 /* To get debugging spinlocks which detect and catch
  * deadlock situations, set CONFIG_DEBUG_SPINLOCK
  * and rebuild your kernel.
 
 #define arch_spin_is_locked(lp)        ((lp)->lock != 0)
 
-#define arch_spin_unlock_wait(lp)      \
-       do {    rmb();                  \
-       } while((lp)->lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->lock, !VAL);
+}
 
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
index b9d63c0a7aabc2c726ff54c617f99bdb8b3ca74a..2c373329d5cb8dc8dc87c934feb8dbf64ecf9ce6 100644 (file)
@@ -27,39 +27,44 @@ static DEFINE_SPINLOCK(dummy);
 
 #endif /* SMP */
 
-#define ATOMIC_OP_RETURN(op, c_op)                                     \
-int atomic_##op##_return(int i, atomic_t *v)                           \
+#define ATOMIC_FETCH_OP(op, c_op)                                      \
+int atomic_fetch_##op(int i, atomic_t *v)                              \
 {                                                                      \
        int ret;                                                        \
        unsigned long flags;                                            \
        spin_lock_irqsave(ATOMIC_HASH(v), flags);                       \
                                                                        \
-       ret = (v->counter c_op i);                                      \
+       ret = v->counter;                                               \
+       v->counter c_op i;                                              \
                                                                        \
        spin_unlock_irqrestore(ATOMIC_HASH(v), flags);                  \
        return ret;                                                     \
 }                                                                      \
-EXPORT_SYMBOL(atomic_##op##_return);
+EXPORT_SYMBOL(atomic_fetch_##op);
 
-#define ATOMIC_OP(op, c_op)                                            \
-void atomic_##op(int i, atomic_t *v)                                   \
+#define ATOMIC_OP_RETURN(op, c_op)                                     \
+int atomic_##op##_return(int i, atomic_t *v)                           \
 {                                                                      \
+       int ret;                                                        \
        unsigned long flags;                                            \
        spin_lock_irqsave(ATOMIC_HASH(v), flags);                       \
                                                                        \
-       v->counter c_op i;                                              \
+       ret = (v->counter c_op i);                                      \
                                                                        \
        spin_unlock_irqrestore(ATOMIC_HASH(v), flags);                  \
+       return ret;                                                     \
 }                                                                      \
-EXPORT_SYMBOL(atomic_##op);
+EXPORT_SYMBOL(atomic_##op##_return);
 
 ATOMIC_OP_RETURN(add, +=)
-ATOMIC_OP(and, &=)
-ATOMIC_OP(or, |=)
-ATOMIC_OP(xor, ^=)
 
+ATOMIC_FETCH_OP(add, +=)
+ATOMIC_FETCH_OP(and, &=)
+ATOMIC_FETCH_OP(or, |=)
+ATOMIC_FETCH_OP(xor, ^=)
+
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
 
 int atomic_xchg(atomic_t *v, int new)
 {
index d6b0363f345bfbd41aeb4a7a287ee651404f2259..a5c5a0279cccc7b3487e9361500cf8162a3c89ad 100644 (file)
@@ -9,10 +9,11 @@
 
        .text
 
-       /* Two versions of the atomic routines, one that
+       /* Three versions of the atomic routines, one that
         * does not return a value and does not perform
-        * memory barriers, and a second which returns
-        * a value and does the barriers.
+        * memory barriers, and a two which return
+        * a value, the new and old value resp. and does the
+        * barriers.
         */
 
 #define ATOMIC_OP(op)                                                  \
@@ -43,15 +44,34 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
 ENDPROC(atomic_##op##_return);
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)                                            \
+ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */       \
+       BACKOFF_SETUP(%o2);                                             \
+1:     lduw    [%o1], %g1;                                             \
+       op      %g1, %o0, %g7;                                          \
+       cas     [%o1], %g1, %g7;                                        \
+       cmp     %g1, %g7;                                               \
+       bne,pn  %icc, BACKOFF_LABEL(2f, 1b);                            \
+        nop;                                                           \
+       retl;                                                           \
+        sra    %g1, 0, %o0;                                            \
+2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
+ENDPROC(atomic_fetch_##op);
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
@@ -83,15 +103,34 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */      \
 2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
 ENDPROC(atomic64_##op##_return);
 
-#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
+#define ATOMIC64_FETCH_OP(op)                                          \
+ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */     \
+       BACKOFF_SETUP(%o2);                                             \
+1:     ldx     [%o1], %g1;                                             \
+       op      %g1, %o0, %g7;                                          \
+       casx    [%o1], %g1, %g7;                                        \
+       cmp     %g1, %g7;                                               \
+       bne,pn  %xcc, BACKOFF_LABEL(2f, 1b);                            \
+        nop;                                                           \
+       retl;                                                           \
+        mov    %g1, %o0;                                               \
+2:     BACKOFF_SPIN(%o2, %o3, 1b);                                     \
+ENDPROC(atomic64_fetch_##op);
+
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
 
 ATOMIC64_OPS(add)
 ATOMIC64_OPS(sub)
-ATOMIC64_OP(and)
-ATOMIC64_OP(or)
-ATOMIC64_OP(xor)
 
 #undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
+
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
index 8eb454cfe05c9f17a3d084306b085f6dde095ee3..de5e97817bdb6be31fd95f63030112035b766b27 100644 (file)
@@ -107,15 +107,24 @@ EXPORT_SYMBOL(atomic64_##op);
 EXPORT_SYMBOL(atomic_##op##_return);                                   \
 EXPORT_SYMBOL(atomic64_##op##_return);
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_FETCH_OP(op)                                            \
+EXPORT_SYMBOL(atomic_fetch_##op);                                      \
+EXPORT_SYMBOL(atomic64_fetch_##op);
+
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
 
 #undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 9fc0107a9c5e557f226b62da9a5e086a4f7b4272..8dda3c8ff5ab5f4b0930af466b068a23b7ffcd04 100644 (file)
@@ -46,6 +46,8 @@ static inline int atomic_read(const atomic_t *v)
  */
 #define atomic_sub_return(i, v)                atomic_add_return((int)(-(i)), (v))
 
+#define atomic_fetch_sub(i, v)         atomic_fetch_add(-(int)(i), (v))
+
 /**
  * atomic_sub - subtract integer from atomic variable
  * @i: integer value to subtract
index d320ce253d8630d1dee48a366de0cf3b7823c0ab..a93774255136d4c250065407d2c295ac7f2088e7 100644 (file)
@@ -34,18 +34,29 @@ static inline void atomic_add(int i, atomic_t *v)
        _atomic_xchg_add(&v->counter, i);
 }
 
-#define ATOMIC_OP(op)                                                  \
-unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \
+#define ATOMIC_OPS(op)                                                 \
+unsigned long _atomic_fetch_##op(volatile unsigned long *p, unsigned long mask); \
 static inline void atomic_##op(int i, atomic_t *v)                     \
 {                                                                      \
-       _atomic_##op((unsigned long *)&v->counter, i);                  \
+       _atomic_fetch_##op((unsigned long *)&v->counter, i);            \
+}                                                                      \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       smp_mb();                                                       \
+       return _atomic_fetch_##op((unsigned long *)&v->counter, i);     \
 }
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
-#undef ATOMIC_OP
+#undef ATOMIC_OPS
+
+static inline int atomic_fetch_add(int i, atomic_t *v)
+{
+       smp_mb();
+       return _atomic_xchg_add(&v->counter, i);
+}
 
 /**
  * atomic_add_return - add integer and return
@@ -126,16 +137,29 @@ static inline void atomic64_add(long long i, atomic64_t *v)
        _atomic64_xchg_add(&v->counter, i);
 }
 
-#define ATOMIC64_OP(op)                                                \
-long long _atomic64_##op(long long *v, long long n);           \
+#define ATOMIC64_OPS(op)                                       \
+long long _atomic64_fetch_##op(long long *v, long long n);     \
 static inline void atomic64_##op(long long i, atomic64_t *v)   \
 {                                                              \
-       _atomic64_##op(&v->counter, i);                         \
+       _atomic64_fetch_##op(&v->counter, i);                   \
+}                                                              \
+static inline long long atomic64_fetch_##op(long long i, atomic64_t *v)        \
+{                                                              \
+       smp_mb();                                               \
+       return _atomic64_fetch_##op(&v->counter, i);            \
 }
 
-ATOMIC64_OP(and)
-ATOMIC64_OP(or)
-ATOMIC64_OP(xor)
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
+
+#undef ATOMIC64_OPS
+
+static inline long long atomic64_fetch_add(long long i, atomic64_t *v)
+{
+       smp_mb();
+       return _atomic64_xchg_add(&v->counter, i);
+}
 
 /**
  * atomic64_add_return - add integer and return
@@ -186,6 +210,7 @@ static inline void atomic64_set(atomic64_t *v, long long n)
 #define atomic64_inc_return(v)         atomic64_add_return(1LL, (v))
 #define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
 #define atomic64_sub_return(i, v)      atomic64_add_return(-(i), (v))
+#define atomic64_fetch_sub(i, v)       atomic64_fetch_add(-(i), (v))
 #define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
 #define atomic64_sub(i, v)             atomic64_add(-(i), (v))
 #define atomic64_dec(v)                        atomic64_sub(1LL, (v))
@@ -193,7 +218,6 @@ static inline void atomic64_set(atomic64_t *v, long long n)
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
 
-
 #endif /* !__ASSEMBLY__ */
 
 /*
@@ -242,16 +266,16 @@ struct __get_user {
        unsigned long val;
        int err;
 };
-extern struct __get_user __atomic_cmpxchg(volatile int *p,
+extern struct __get_user __atomic32_cmpxchg(volatile int *p,
                                          int *lock, int o, int n);
-extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
+extern struct __get_user __atomic32_xchg(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_xchg_add(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_xchg_add_unless(volatile int *p,
                                                  int *lock, int o, int n);
-extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_and(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
-extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_fetch_or(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_fetch_and(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_fetch_andn(volatile int *p, int *lock, int n);
+extern struct __get_user __atomic32_fetch_xor(volatile int *p, int *lock, int n);
 extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
                                        long long o, long long n);
 extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
@@ -259,9 +283,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
                                        long long n);
 extern long long __atomic64_xchg_add_unless(volatile long long *p,
                                        int *lock, long long o, long long n);
-extern long long __atomic64_and(volatile long long *p, int *lock, long long n);
-extern long long __atomic64_or(volatile long long *p, int *lock, long long n);
-extern long long __atomic64_xor(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_fetch_and(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_fetch_or(volatile long long *p, int *lock, long long n);
+extern long long __atomic64_fetch_xor(volatile long long *p, int *lock, long long n);
 
 /* Return failure from the atomic wrappers. */
 struct __get_user __atomic_bad_address(int __user *addr);
index b0531a623653a702544c475c110e8d19cb7b10eb..4cefa0c9fd81303d30d65a27e0e60adc43a4860f 100644 (file)
  * on any routine which updates memory and returns a value.
  */
 
-static inline void atomic_add(int i, atomic_t *v)
-{
-       __insn_fetchadd4((void *)&v->counter, i);
-}
-
 /*
  * Note a subtlety of the locking here.  We are required to provide a
  * full memory barrier before and after the operation.  However, we
@@ -59,28 +54,39 @@ static inline int atomic_add_return(int i, atomic_t *v)
        return val;
 }
 
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+#define ATOMIC_OPS(op)                                                 \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       int val;                                                        \
+       smp_mb();                                                       \
+       val = __insn_fetch##op##4((void *)&v->counter, i);              \
+       smp_mb();                                                       \
+       return val;                                                     \
+}                                                                      \
+static inline void atomic_##op(int i, atomic_t *v)                     \
+{                                                                      \
+       __insn_fetch##op##4((void *)&v->counter, i);                    \
+}
+
+ATOMIC_OPS(add)
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+
+#undef ATOMIC_OPS
+
+static inline int atomic_fetch_xor(int i, atomic_t *v)
 {
        int guess, oldval = v->counter;
+       smp_mb();
        do {
-               if (oldval == u)
-                       break;
                guess = oldval;
-               oldval = cmpxchg(&v->counter, guess, guess + a);
+               __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+               oldval = __insn_cmpexch4(&v->counter, guess ^ i);
        } while (guess != oldval);
+       smp_mb();
        return oldval;
 }
 
-static inline void atomic_and(int i, atomic_t *v)
-{
-       __insn_fetchand4((void *)&v->counter, i);
-}
-
-static inline void atomic_or(int i, atomic_t *v)
-{
-       __insn_fetchor4((void *)&v->counter, i);
-}
-
 static inline void atomic_xor(int i, atomic_t *v)
 {
        int guess, oldval = v->counter;
@@ -91,6 +97,18 @@ static inline void atomic_xor(int i, atomic_t *v)
        } while (guess != oldval);
 }
 
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int guess, oldval = v->counter;
+       do {
+               if (oldval == u)
+                       break;
+               guess = oldval;
+               oldval = cmpxchg(&v->counter, guess, guess + a);
+       } while (guess != oldval);
+       return oldval;
+}
+
 /* Now the true 64-bit operations. */
 
 #define ATOMIC64_INIT(i)       { (i) }
@@ -98,11 +116,6 @@ static inline void atomic_xor(int i, atomic_t *v)
 #define atomic64_read(v)       READ_ONCE((v)->counter)
 #define atomic64_set(v, i)     WRITE_ONCE((v)->counter, (i))
 
-static inline void atomic64_add(long i, atomic64_t *v)
-{
-       __insn_fetchadd((void *)&v->counter, i);
-}
-
 static inline long atomic64_add_return(long i, atomic64_t *v)
 {
        int val;
@@ -112,26 +125,37 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
        return val;
 }
 
-static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+#define ATOMIC64_OPS(op)                                               \
+static inline long atomic64_fetch_##op(long i, atomic64_t *v)          \
+{                                                                      \
+       long val;                                                       \
+       smp_mb();                                                       \
+       val = __insn_fetch##op((void *)&v->counter, i);                 \
+       smp_mb();                                                       \
+       return val;                                                     \
+}                                                                      \
+static inline void atomic64_##op(long i, atomic64_t *v)                        \
+{                                                                      \
+       __insn_fetch##op((void *)&v->counter, i);                       \
+}
+
+ATOMIC64_OPS(add)
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+
+#undef ATOMIC64_OPS
+
+static inline long atomic64_fetch_xor(long i, atomic64_t *v)
 {
        long guess, oldval = v->counter;
+       smp_mb();
        do {
-               if (oldval == u)
-                       break;
                guess = oldval;
-               oldval = cmpxchg(&v->counter, guess, guess + a);
+               __insn_mtspr(SPR_CMPEXCH_VALUE, guess);
+               oldval = __insn_cmpexch(&v->counter, guess ^ i);
        } while (guess != oldval);
-       return oldval != u;
-}
-
-static inline void atomic64_and(long i, atomic64_t *v)
-{
-       __insn_fetchand((void *)&v->counter, i);
-}
-
-static inline void atomic64_or(long i, atomic64_t *v)
-{
-       __insn_fetchor((void *)&v->counter, i);
+       smp_mb();
+       return oldval;
 }
 
 static inline void atomic64_xor(long i, atomic64_t *v)
@@ -144,7 +168,20 @@ static inline void atomic64_xor(long i, atomic64_t *v)
        } while (guess != oldval);
 }
 
+static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long guess, oldval = v->counter;
+       do {
+               if (oldval == u)
+                       break;
+               guess = oldval;
+               oldval = cmpxchg(&v->counter, guess, guess + a);
+       } while (guess != oldval);
+       return oldval != u;
+}
+
 #define atomic64_sub_return(i, v)      atomic64_add_return(-(i), (v))
+#define atomic64_fetch_sub(i, v)       atomic64_fetch_add(-(i), (v))
 #define atomic64_sub(i, v)             atomic64_add(-(i), (v))
 #define atomic64_inc_return(v)         atomic64_add_return(1, (v))
 #define atomic64_dec_return(v)         atomic64_sub_return(1, (v))
index d55222806c2f7d7ee96f135da21ec787d1123610..4c419ab95ab772d0b3190c9d93222348f647aae9 100644 (file)
@@ -87,6 +87,13 @@ mb_incoherent(void)
 #define __smp_mb__after_atomic()       __smp_mb()
 #endif
 
+/*
+ * The TILE architecture does not do speculative reads; this ensures
+ * that a control dependency also orders against loads and already provides
+ * a LOAD->{LOAD,STORE} order and can forgo the additional RMB.
+ */
+#define smp_acquire__after_ctrl_dep()  barrier()
+
 #include <asm-generic/barrier.h>
 
 #endif /* !__ASSEMBLY__ */
index bbf7b666f21df85cd511fbdde71edf403041644d..d1406a95f6b7d9412b92f844da013cc73bd0121f 100644 (file)
@@ -19,9 +19,9 @@
 #include <asm/barrier.h>
 
 /* Tile-specific routines to support <asm/bitops.h>. */
-unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
-unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask);
-unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
+unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask);
+unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask);
+unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask);
 
 /**
  * set_bit - Atomically set a bit in memory
@@ -35,7 +35,7 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
  */
 static inline void set_bit(unsigned nr, volatile unsigned long *addr)
 {
-       _atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr));
+       _atomic_fetch_or(addr + BIT_WORD(nr), BIT_MASK(nr));
 }
 
 /**
@@ -54,7 +54,7 @@ static inline void set_bit(unsigned nr, volatile unsigned long *addr)
  */
 static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
 {
-       _atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
+       _atomic_fetch_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
 }
 
 /**
@@ -69,7 +69,7 @@ static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
  */
 static inline void change_bit(unsigned nr, volatile unsigned long *addr)
 {
-       _atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
+       _atomic_fetch_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
 }
 
 /**
@@ -85,7 +85,7 @@ static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
        unsigned long mask = BIT_MASK(nr);
        addr += BIT_WORD(nr);
        smp_mb();  /* barrier for proper semantics */
-       return (_atomic_or(addr, mask) & mask) != 0;
+       return (_atomic_fetch_or(addr, mask) & mask) != 0;
 }
 
 /**
@@ -101,7 +101,7 @@ static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
        unsigned long mask = BIT_MASK(nr);
        addr += BIT_WORD(nr);
        smp_mb();  /* barrier for proper semantics */
-       return (_atomic_andn(addr, mask) & mask) != 0;
+       return (_atomic_fetch_andn(addr, mask) & mask) != 0;
 }
 
 /**
@@ -118,7 +118,7 @@ static inline int test_and_change_bit(unsigned nr,
        unsigned long mask = BIT_MASK(nr);
        addr += BIT_WORD(nr);
        smp_mb();  /* barrier for proper semantics */
-       return (_atomic_xor(addr, mask) & mask) != 0;
+       return (_atomic_fetch_xor(addr, mask) & mask) != 0;
 }
 
 #include <asm-generic/bitops/ext2-atomic.h>
index 1a6ef1b69cb13cd4a8560c007a16a77cbbde36cf..e64a1b75fc386c4786d6a4988a4f5eb71bbfbd27 100644 (file)
                ret = gu.err;                                           \
        }
 
-#define __futex_set() __futex_call(__atomic_xchg)
-#define __futex_add() __futex_call(__atomic_xchg_add)
-#define __futex_or() __futex_call(__atomic_or)
-#define __futex_andn() __futex_call(__atomic_andn)
-#define __futex_xor() __futex_call(__atomic_xor)
+#define __futex_set() __futex_call(__atomic32_xchg)
+#define __futex_add() __futex_call(__atomic32_xchg_add)
+#define __futex_or() __futex_call(__atomic32_fetch_or)
+#define __futex_andn() __futex_call(__atomic32_fetch_andn)
+#define __futex_xor() __futex_call(__atomic32_fetch_xor)
 
 #define __futex_cmpxchg()                                              \
        {                                                               \
-               struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
-                                                       lock, oldval, oparg); \
+               struct __get_user gu = __atomic32_cmpxchg((u32 __force *)uaddr, \
+                                                         lock, oldval, oparg); \
                val = gu.val;                                           \
                ret = gu.err;                                           \
        }
index 298df1e9912a15b18c4de770a41f25ba14d20d73..f8128800dbf57661661270c60e30614a5c0f42c9 100644 (file)
@@ -61,13 +61,13 @@ static inline int *__atomic_setup(volatile void *v)
 
 int _atomic_xchg(int *v, int n)
 {
-       return __atomic_xchg(v, __atomic_setup(v), n).val;
+       return __atomic32_xchg(v, __atomic_setup(v), n).val;
 }
 EXPORT_SYMBOL(_atomic_xchg);
 
 int _atomic_xchg_add(int *v, int i)
 {
-       return __atomic_xchg_add(v, __atomic_setup(v), i).val;
+       return __atomic32_xchg_add(v, __atomic_setup(v), i).val;
 }
 EXPORT_SYMBOL(_atomic_xchg_add);
 
@@ -78,39 +78,39 @@ int _atomic_xchg_add_unless(int *v, int a, int u)
         * to use the first argument consistently as the "old value"
         * in the assembly, as is done for _atomic_cmpxchg().
         */
-       return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
+       return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val;
 }
 EXPORT_SYMBOL(_atomic_xchg_add_unless);
 
 int _atomic_cmpxchg(int *v, int o, int n)
 {
-       return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
+       return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val;
 }
 EXPORT_SYMBOL(_atomic_cmpxchg);
 
-unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
+unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
 {
-       return __atomic_or((int *)p, __atomic_setup(p), mask).val;
+       return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val;
 }
-EXPORT_SYMBOL(_atomic_or);
+EXPORT_SYMBOL(_atomic_fetch_or);
 
-unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
+unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
 {
-       return __atomic_and((int *)p, __atomic_setup(p), mask).val;
+       return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val;
 }
-EXPORT_SYMBOL(_atomic_and);
+EXPORT_SYMBOL(_atomic_fetch_and);
 
-unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
+unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
 {
-       return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
+       return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val;
 }
-EXPORT_SYMBOL(_atomic_andn);
+EXPORT_SYMBOL(_atomic_fetch_andn);
 
-unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
+unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
 {
-       return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
+       return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val;
 }
-EXPORT_SYMBOL(_atomic_xor);
+EXPORT_SYMBOL(_atomic_fetch_xor);
 
 
 long long _atomic64_xchg(long long *v, long long n)
@@ -142,23 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n)
 }
 EXPORT_SYMBOL(_atomic64_cmpxchg);
 
-long long _atomic64_and(long long *v, long long n)
+long long _atomic64_fetch_and(long long *v, long long n)
 {
-       return __atomic64_and(v, __atomic_setup(v), n);
+       return __atomic64_fetch_and(v, __atomic_setup(v), n);
 }
-EXPORT_SYMBOL(_atomic64_and);
+EXPORT_SYMBOL(_atomic64_fetch_and);
 
-long long _atomic64_or(long long *v, long long n)
+long long _atomic64_fetch_or(long long *v, long long n)
 {
-       return __atomic64_or(v, __atomic_setup(v), n);
+       return __atomic64_fetch_or(v, __atomic_setup(v), n);
 }
-EXPORT_SYMBOL(_atomic64_or);
+EXPORT_SYMBOL(_atomic64_fetch_or);
 
-long long _atomic64_xor(long long *v, long long n)
+long long _atomic64_fetch_xor(long long *v, long long n)
 {
-       return __atomic64_xor(v, __atomic_setup(v), n);
+       return __atomic64_fetch_xor(v, __atomic_setup(v), n);
 }
-EXPORT_SYMBOL(_atomic64_xor);
+EXPORT_SYMBOL(_atomic64_fetch_xor);
 
 /*
  * If any of the atomic or futex routines hit a bad address (not in
index f611265633d6958d1f21514a479f59ea53b20d0c..1a70e6c0f25936f62d8e95a77ec9e97ac1b66dc9 100644 (file)
@@ -172,15 +172,20 @@ STD_ENTRY_SECTION(__atomic\name, .text.atomic)
        .endif
        .endm
 
-atomic_op _cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }"
-atomic_op _xchg, 32, "move r24, r2"
-atomic_op _xchg_add, 32, "add r24, r22, r2"
-atomic_op _xchg_add_unless, 32, \
+
+/*
+ * Use __atomic32 prefix to avoid collisions with GCC builtin __atomic functions.
+ */
+
+atomic_op 32_cmpxchg, 32, "seq r26, r22, r2; { bbns r26, 3f; move r24, r3 }"
+atomic_op 32_xchg, 32, "move r24, r2"
+atomic_op 32_xchg_add, 32, "add r24, r22, r2"
+atomic_op 32_xchg_add_unless, 32, \
        "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }"
-atomic_op _or, 32, "or r24, r22, r2"
-atomic_op _and, 32, "and r24, r22, r2"
-atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2"
-atomic_op _xor, 32, "xor r24, r22, r2"
+atomic_op 32_fetch_or, 32, "or r24, r22, r2"
+atomic_op 32_fetch_and, 32, "and r24, r22, r2"
+atomic_op 32_fetch_andn, 32, "nor r2, r2, zero; and r24, r22, r2"
+atomic_op 32_fetch_xor, 32, "xor r24, r22, r2"
 
 atomic_op 64_cmpxchg, 64, "{ seq r26, r22, r2; seq r27, r23, r3 }; \
        { bbns r26, 3f; move r24, r4 }; { bbns r27, 3f; move r25, r5 }"
@@ -192,9 +197,9 @@ atomic_op 64_xchg_add_unless, 64, \
        { bbns r26, 3f; add r24, r22, r4 }; \
        { bbns r27, 3f; add r25, r23, r5 }; \
        slt_u r26, r24, r22; add r25, r25, r26"
-atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
-atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
-atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
+atomic_op 64_fetch_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }"
+atomic_op 64_fetch_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }"
+atomic_op 64_fetch_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }"
 
        jrp     lr              /* happy backtracer */
 
index 88c2a53362e738110913134b840e1abe01df9fd9..076c6cc431136fc8475b0fa0b768bb7a6083dc66 100644 (file)
@@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock)
        do {
                delay_backoff(iterations++);
        } while (READ_ONCE(lock->current_ticket) == curr);
+
+       /*
+        * The TILE architecture doesn't do read speculation; therefore
+        * a control dependency guarantees a LOAD->{LOAD,STORE} order.
+        */
+       barrier();
 }
 EXPORT_SYMBOL(arch_spin_unlock_wait);
 
index c8d1f94ff1fe00e13f30a6c0e3ae51563a3226fa..a4b5b2cbce9337bdc775a92c087b3c5cf3876965 100644 (file)
@@ -76,6 +76,12 @@ void arch_spin_unlock_wait(arch_spinlock_t *lock)
        do {
                delay_backoff(iterations++);
        } while (arch_spin_current(READ_ONCE(lock->lock)) == curr);
+
+       /*
+        * The TILE architecture doesn't do read speculation; therefore
+        * a control dependency guarantees a LOAD->{LOAD,STORE} order.
+        */
+       barrier();
 }
 EXPORT_SYMBOL(arch_spin_unlock_wait);
 
index d9a94da0c29fd72b048080681ab1660928a7d3f0..5977fea2c8b1d8cd542ddf831fb405b4de9ea517 100644 (file)
@@ -49,7 +49,6 @@ config X86
        select ARCH_WANTS_DYNAMIC_TASK_STRUCT
        select ARCH_WANT_FRAME_POINTERS
        select ARCH_WANT_IPC_PARSE_VERSION      if X86_32
-       select ARCH_WANT_OPTIONAL_GPIOLIB
        select BUILDTIME_EXTABLE_SORT
        select CLKEVT_I8253
        select CLKSRC_I8253                     if X86_32
@@ -294,11 +293,6 @@ config X86_32_LAZY_GS
        def_bool y
        depends on X86_32 && !CC_STACKPROTECTOR
 
-config ARCH_HWEIGHT_CFLAGS
-       string
-       default "-fcall-saved-ecx -fcall-saved-edx" if X86_32
-       default "-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" if X86_64
-
 config ARCH_SUPPORTS_UPROBES
        def_bool y
 
@@ -643,7 +637,7 @@ config STA2X11
        select X86_DMA_REMAP
        select SWIOTLB
        select MFD_STA2X11
-       select ARCH_REQUIRE_GPIOLIB
+       select GPIOLIB
        default n
        ---help---
          This adds support for boards based on the STA2X11 IO-Hub,
@@ -1934,21 +1928,26 @@ config RANDOMIZE_BASE
          attempts relying on knowledge of the location of kernel
          code internals.
 
-         The kernel physical and virtual address can be randomized
-         from 16MB up to 1GB on 64-bit and 512MB on 32-bit. (Note that
-         using RANDOMIZE_BASE reduces the memory space available to
-         kernel modules from 1.5GB to 1GB.)
+         On 64-bit, the kernel physical and virtual addresses are
+         randomized separately. The physical address will be anywhere
+         between 16MB and the top of physical memory (up to 64TB). The
+         virtual address will be randomized from 16MB up to 1GB (9 bits
+         of entropy). Note that this also reduces the memory space
+         available to kernel modules from 1.5GB to 1GB.
+
+         On 32-bit, the kernel physical and virtual addresses are
+         randomized together. They will be randomized from 16MB up to
+         512MB (8 bits of entropy).
 
          Entropy is generated using the RDRAND instruction if it is
          supported. If RDTSC is supported, its value is mixed into
          the entropy pool as well. If neither RDRAND nor RDTSC are
-         supported, then entropy is read from the i8254 timer.
-
-         Since the kernel is built using 2GB addressing, and
-         PHYSICAL_ALIGN must be at a minimum of 2MB, only 10 bits of
-         entropy is theoretically possible. Currently, with the
-         default value for PHYSICAL_ALIGN and due to page table
-         layouts, 64-bit uses 9 bits of entropy and 32-bit uses 8 bits.
+         supported, then entropy is read from the i8254 timer. The
+         usable entropy is limited by the kernel being built using
+         2GB addressing, and that PHYSICAL_ALIGN must be at a
+         minimum of 2MB. As a result, only 10 bits of entropy are
+         theoretically possible, but the implementations are further
+         limited due to memory layouts.
 
          If CONFIG_HIBERNATE is also enabled, KASLR is disabled at boot
          time. To enable it, boot with "kaslr" on the kernel command
@@ -1988,6 +1987,38 @@ config PHYSICAL_ALIGN
 
          Don't change this unless you know what you are doing.
 
+config RANDOMIZE_MEMORY
+       bool "Randomize the kernel memory sections"
+       depends on X86_64
+       depends on RANDOMIZE_BASE
+       default RANDOMIZE_BASE
+       ---help---
+          Randomizes the base virtual address of kernel memory sections
+          (physical memory mapping, vmalloc & vmemmap). This security feature
+          makes exploits relying on predictable memory locations less reliable.
+
+          The order of allocations remains unchanged. Entropy is generated in
+          the same way as RANDOMIZE_BASE. Current implementation in the optimal
+          configuration have in average 30,000 different possible virtual
+          addresses for each memory section.
+
+          If unsure, say N.
+
+config RANDOMIZE_MEMORY_PHYSICAL_PADDING
+       hex "Physical memory mapping padding" if EXPERT
+       depends on RANDOMIZE_MEMORY
+       default "0xa" if MEMORY_HOTPLUG
+       default "0x0"
+       range 0x1 0x40 if MEMORY_HOTPLUG
+       range 0x0 0x40
+       ---help---
+          Define the padding in terabytes added to the existing physical
+          memory size during kernel memory randomization. It is useful
+          for memory hotplug support but reduces the entropy available for
+          address randomization.
+
+          If unsure, leave at the default value.
+
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
        depends on SMP
index 878e4b9940d9212ce581c5bea0ac518ae6bbf85f..0d41d68131cc43bcfae0bf88da2561cce6b0129b 100644 (file)
 #define BOOT_BITOPS_H
 #define _LINUX_BITOPS_H                /* Inhibit inclusion of <linux/bitops.h> */
 
-static inline int constant_test_bit(int nr, const void *addr)
+#include <linux/types.h>
+
+static inline bool constant_test_bit(int nr, const void *addr)
 {
        const u32 *p = (const u32 *)addr;
        return ((1UL << (nr & 31)) & (p[nr >> 5])) != 0;
 }
-static inline int variable_test_bit(int nr, const void *addr)
+static inline bool variable_test_bit(int nr, const void *addr)
 {
-       u8 v;
+       bool v;
        const u32 *p = (const u32 *)addr;
 
        asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
index 9011a88353ded70ece09718a04349464ee8467ec..e5612f3e3b57ca7611f1e348c7f317f1416dd54a 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/types.h>
 #include <linux/edd.h>
 #include <asm/setup.h>
+#include <asm/asm.h>
 #include "bitops.h"
 #include "ctype.h"
 #include "cpuflags.h"
@@ -176,18 +177,18 @@ static inline void wrgs32(u32 v, addr_t addr)
 }
 
 /* Note: these only return true/false, not a signed return value! */
-static inline int memcmp_fs(const void *s1, addr_t s2, size_t len)
+static inline bool memcmp_fs(const void *s1, addr_t s2, size_t len)
 {
-       u8 diff;
-       asm volatile("fs; repe; cmpsb; setnz %0"
-                    : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+       bool diff;
+       asm volatile("fs; repe; cmpsb" CC_SET(nz)
+                    : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
        return diff;
 }
-static inline int memcmp_gs(const void *s1, addr_t s2, size_t len)
+static inline bool memcmp_gs(const void *s1, addr_t s2, size_t len)
 {
-       u8 diff;
-       asm volatile("gs; repe; cmpsb; setnz %0"
-                    : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
+       bool diff;
+       asm volatile("gs; repe; cmpsb" CC_SET(nz)
+                    : CC_OUT(nz) (diff), "+D" (s1), "+S" (s2), "+c" (len));
        return diff;
 }
 
@@ -294,6 +295,7 @@ static inline int cmdline_find_option_bool(const char *option)
 
 /* cpu.c, cpucheck.c */
 int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
+int check_knl_erratum(void);
 int validate_cpu(void);
 
 /* early_serial_console.c */
index f1356889204e5c74430bc78bf231c12901866730..536ccfcc01c673c4e3196a4fe57276cf7d7d53a8 100644 (file)
@@ -85,7 +85,25 @@ vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
        $(objtree)/drivers/firmware/efi/libstub/lib.a
 vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
 
+# The compressed kernel is built with -fPIC/-fPIE so that a boot loader
+# can place it anywhere in memory and it will still run. However, since
+# it is executed as-is without any ELF relocation processing performed
+# (and has already had all relocation sections stripped from the binary),
+# none of the code can use data relocations (e.g. static assignments of
+# pointer values), since they will be meaningless at runtime. This check
+# will refuse to link the vmlinux if any of these relocations are found.
+quiet_cmd_check_data_rel = DATAREL $@
+define cmd_check_data_rel
+       for obj in $(filter %.o,$^); do \
+               readelf -S $$obj | grep -qF .rel.local && { \
+                       echo "error: $$obj has data relocations!" >&2; \
+                       exit 1; \
+               } || true; \
+       done
+endef
+
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
+       $(call if_changed,check_data_rel)
        $(call if_changed,ld)
 
 OBJCOPYFLAGS_vmlinux.bin :=  -R .comment -S
index 52fef606bc54258b7095aa6b4b16eda8fbf16244..ff574dad95ccaf2739d0edfe85c94acea1e15134 100644 (file)
@@ -757,7 +757,6 @@ struct boot_params *make_boot_params(struct efi_config *c)
        struct boot_params *boot_params;
        struct apm_bios_info *bi;
        struct setup_header *hdr;
-       struct efi_info *efi;
        efi_loaded_image_t *image;
        void *options, *handle;
        efi_guid_t proto = LOADED_IMAGE_PROTOCOL_GUID;
@@ -800,7 +799,6 @@ struct boot_params *make_boot_params(struct efi_config *c)
        memset(boot_params, 0x0, 0x4000);
 
        hdr = &boot_params->hdr;
-       efi = &boot_params->efi_info;
        bi = &boot_params->apm_bios_info;
 
        /* Copy the second sector to boot_params */
index cfeb0259ed81aeb9341d071cb98e3ff129d06426..a66854d99ee1a34951faa2d426c8579bf4849bbc 100644 (file)
 #include "misc.h"
 #include "error.h"
 
-#include <asm/msr.h>
-#include <asm/archrandom.h>
-#include <asm/e820.h>
-
 #include <generated/compile.h>
 #include <linux/module.h>
 #include <linux/uts.h>
 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
                LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
 
-#define I8254_PORT_CONTROL     0x43
-#define I8254_PORT_COUNTER0    0x40
-#define I8254_CMD_READBACK     0xC0
-#define I8254_SELECT_COUNTER0  0x02
-#define I8254_STATUS_NOTREADY  0x40
-static inline u16 i8254(void)
-{
-       u16 status, timer;
-
-       do {
-               outb(I8254_PORT_CONTROL,
-                    I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
-               status = inb(I8254_PORT_COUNTER0);
-               timer  = inb(I8254_PORT_COUNTER0);
-               timer |= inb(I8254_PORT_COUNTER0) << 8;
-       } while (status & I8254_STATUS_NOTREADY);
-
-       return timer;
-}
-
 static unsigned long rotate_xor(unsigned long hash, const void *area,
                                size_t size)
 {
@@ -62,7 +38,7 @@ static unsigned long rotate_xor(unsigned long hash, const void *area,
 }
 
 /* Attempt to create a simple but unpredictable starting entropy. */
-static unsigned long get_random_boot(void)
+static unsigned long get_boot_seed(void)
 {
        unsigned long hash = 0;
 
@@ -72,50 +48,8 @@ static unsigned long get_random_boot(void)
        return hash;
 }
 
-static unsigned long get_random_long(const char *purpose)
-{
-#ifdef CONFIG_X86_64
-       const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
-#else
-       const unsigned long mix_const = 0x3f39e593UL;
-#endif
-       unsigned long raw, random = get_random_boot();
-       bool use_i8254 = true;
-
-       debug_putstr(purpose);
-       debug_putstr(" KASLR using");
-
-       if (has_cpuflag(X86_FEATURE_RDRAND)) {
-               debug_putstr(" RDRAND");
-               if (rdrand_long(&raw)) {
-                       random ^= raw;
-                       use_i8254 = false;
-               }
-       }
-
-       if (has_cpuflag(X86_FEATURE_TSC)) {
-               debug_putstr(" RDTSC");
-               raw = rdtsc();
-
-               random ^= raw;
-               use_i8254 = false;
-       }
-
-       if (use_i8254) {
-               debug_putstr(" i8254");
-               random ^= i8254();
-       }
-
-       /* Circular multiply for better bit diffusion */
-       asm("mul %3"
-           : "=a" (random), "=d" (raw)
-           : "a" (random), "rm" (mix_const));
-       random += raw;
-
-       debug_putstr("...\n");
-
-       return random;
-}
+#define KASLR_COMPRESSED_BOOT
+#include "../../lib/kaslr.c"
 
 struct mem_vector {
        unsigned long start;
@@ -132,17 +66,6 @@ enum mem_avoid_index {
 
 static struct mem_vector mem_avoid[MEM_AVOID_MAX];
 
-static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
-{
-       /* Item at least partially before region. */
-       if (item->start < region->start)
-               return false;
-       /* Item at least partially after region. */
-       if (item->start + item->size > region->start + region->size)
-               return false;
-       return true;
-}
-
 static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
 {
        /* Item one is entirely before item two. */
@@ -296,6 +219,7 @@ static bool mem_avoid_overlap(struct mem_vector *img,
                if (mem_overlaps(img, &mem_avoid[i]) &&
                    mem_avoid[i].start < earliest) {
                        *overlap = mem_avoid[i];
+                       earliest = overlap->start;
                        is_overlapping = true;
                }
        }
@@ -310,6 +234,7 @@ static bool mem_avoid_overlap(struct mem_vector *img,
 
                if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
                        *overlap = avoid;
+                       earliest = overlap->start;
                        is_overlapping = true;
                }
 
@@ -319,8 +244,6 @@ static bool mem_avoid_overlap(struct mem_vector *img,
        return is_overlapping;
 }
 
-static unsigned long slots[KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN];
-
 struct slot_area {
        unsigned long addr;
        int num;
@@ -351,36 +274,44 @@ static void store_slot_info(struct mem_vector *region, unsigned long image_size)
        }
 }
 
-static void slots_append(unsigned long addr)
-{
-       /* Overflowing the slots list should be impossible. */
-       if (slot_max >= KERNEL_IMAGE_SIZE / CONFIG_PHYSICAL_ALIGN)
-               return;
-
-       slots[slot_max++] = addr;
-}
-
 static unsigned long slots_fetch_random(void)
 {
+       unsigned long slot;
+       int i;
+
        /* Handle case of no slots stored. */
        if (slot_max == 0)
                return 0;
 
-       return slots[get_random_long("Physical") % slot_max];
+       slot = kaslr_get_random_long("Physical") % slot_max;
+
+       for (i = 0; i < slot_area_index; i++) {
+               if (slot >= slot_areas[i].num) {
+                       slot -= slot_areas[i].num;
+                       continue;
+               }
+               return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
+       }
+
+       if (i == slot_area_index)
+               debug_putstr("slots_fetch_random() failed!?\n");
+       return 0;
 }
 
 static void process_e820_entry(struct e820entry *entry,
                               unsigned long minimum,
                               unsigned long image_size)
 {
-       struct mem_vector region, img, overlap;
+       struct mem_vector region, overlap;
+       struct slot_area slot_area;
+       unsigned long start_orig;
 
        /* Skip non-RAM entries. */
        if (entry->type != E820_RAM)
                return;
 
-       /* Ignore entries entirely above our maximum. */
-       if (entry->addr >= KERNEL_IMAGE_SIZE)
+       /* On 32-bit, ignore entries entirely above our maximum. */
+       if (IS_ENABLED(CONFIG_X86_32) && entry->addr >= KERNEL_IMAGE_SIZE)
                return;
 
        /* Ignore entries entirely below our minimum. */
@@ -390,31 +321,55 @@ static void process_e820_entry(struct e820entry *entry,
        region.start = entry->addr;
        region.size = entry->size;
 
-       /* Potentially raise address to minimum location. */
-       if (region.start < minimum)
-               region.start = minimum;
+       /* Give up if slot area array is full. */
+       while (slot_area_index < MAX_SLOT_AREA) {
+               start_orig = region.start;
 
-       /* Potentially raise address to meet alignment requirements. */
-       region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
+               /* Potentially raise address to minimum location. */
+               if (region.start < minimum)
+                       region.start = minimum;
 
-       /* Did we raise the address above the bounds of this e820 region? */
-       if (region.start > entry->addr + entry->size)
-               return;
+               /* Potentially raise address to meet alignment needs. */
+               region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
 
-       /* Reduce size by any delta from the original address. */
-       region.size -= region.start - entry->addr;
+               /* Did we raise the address above this e820 region? */
+               if (region.start > entry->addr + entry->size)
+                       return;
 
-       /* Reduce maximum size to fit end of image within maximum limit. */
-       if (region.start + region.size > KERNEL_IMAGE_SIZE)
-               region.size = KERNEL_IMAGE_SIZE - region.start;
+               /* Reduce size by any delta from the original address. */
+               region.size -= region.start - start_orig;
 
-       /* Walk each aligned slot and check for avoided areas. */
-       for (img.start = region.start, img.size = image_size ;
-            mem_contains(&region, &img) ;
-            img.start += CONFIG_PHYSICAL_ALIGN) {
-               if (mem_avoid_overlap(&img, &overlap))
-                       continue;
-               slots_append(img.start);
+               /* On 32-bit, reduce region size to fit within max size. */
+               if (IS_ENABLED(CONFIG_X86_32) &&
+                   region.start + region.size > KERNEL_IMAGE_SIZE)
+                       region.size = KERNEL_IMAGE_SIZE - region.start;
+
+               /* Return if region can't contain decompressed kernel */
+               if (region.size < image_size)
+                       return;
+
+               /* If nothing overlaps, store the region and return. */
+               if (!mem_avoid_overlap(&region, &overlap)) {
+                       store_slot_info(&region, image_size);
+                       return;
+               }
+
+               /* Store beginning of region if holds at least image_size. */
+               if (overlap.start > region.start + image_size) {
+                       struct mem_vector beginning;
+
+                       beginning.start = region.start;
+                       beginning.size = overlap.start - region.start;
+                       store_slot_info(&beginning, image_size);
+               }
+
+               /* Return if overlap extends to or past end of region. */
+               if (overlap.start + overlap.size >= region.start + region.size)
+                       return;
+
+               /* Clip off the overlapping region and start over. */
+               region.size -= overlap.start - region.start + overlap.size;
+               region.start = overlap.start + overlap.size;
        }
 }
 
@@ -431,6 +386,10 @@ static unsigned long find_random_phys_addr(unsigned long minimum,
        for (i = 0; i < boot_params->e820_entries; i++) {
                process_e820_entry(&boot_params->e820_map[i], minimum,
                                   image_size);
+               if (slot_area_index == MAX_SLOT_AREA) {
+                       debug_putstr("Aborted e820 scan (slot_areas full)!\n");
+                       break;
+               }
        }
 
        return slots_fetch_random();
@@ -454,7 +413,7 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
        slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
                 CONFIG_PHYSICAL_ALIGN + 1;
 
-       random_addr = get_random_long("Virtual") % slots;
+       random_addr = kaslr_get_random_long("Virtual") % slots;
 
        return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
 }
@@ -463,48 +422,54 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
  * Since this function examines addresses much more numerically,
  * it takes the input and output pointers as 'unsigned long'.
  */
-unsigned char *choose_random_location(unsigned long input,
-                                     unsigned long input_size,
-                                     unsigned long output,
-                                     unsigned long output_size)
+void choose_random_location(unsigned long input,
+                           unsigned long input_size,
+                           unsigned long *output,
+                           unsigned long output_size,
+                           unsigned long *virt_addr)
 {
-       unsigned long choice = output;
-       unsigned long random_addr;
+       unsigned long random_addr, min_addr;
+
+       /* By default, keep output position unchanged. */
+       *virt_addr = *output;
 
-#ifdef CONFIG_HIBERNATION
-       if (!cmdline_find_option_bool("kaslr")) {
-               warn("KASLR disabled: 'kaslr' not on cmdline (hibernation selected).");
-               goto out;
-       }
-#else
        if (cmdline_find_option_bool("nokaslr")) {
                warn("KASLR disabled: 'nokaslr' on cmdline.");
-               goto out;
+               return;
        }
-#endif
 
        boot_params->hdr.loadflags |= KASLR_FLAG;
 
+       /* Prepare to add new identity pagetables on demand. */
+       initialize_identity_maps();
+
        /* Record the various known unsafe memory ranges. */
-       mem_avoid_init(input, input_size, output);
+       mem_avoid_init(input, input_size, *output);
+
+       /*
+        * Low end of the randomization range should be the
+        * smaller of 512M or the initial kernel image
+        * location:
+        */
+       min_addr = min(*output, 512UL << 20);
 
        /* Walk e820 and find a random address. */
-       random_addr = find_random_phys_addr(output, output_size);
+       random_addr = find_random_phys_addr(min_addr, output_size);
        if (!random_addr) {
                warn("KASLR disabled: could not find suitable E820 region!");
-               goto out;
+       } else {
+               /* Update the new physical address location. */
+               if (*output != random_addr) {
+                       add_identity_map(random_addr, output_size);
+                       *output = random_addr;
+               }
        }
 
-       /* Always enforce the minimum. */
-       if (random_addr < choice)
-               goto out;
-
-       choice = random_addr;
-
-       add_identity_map(choice, output_size);
-
        /* This actually loads the identity pagetable on x86_64. */
        finalize_identity_maps();
-out:
-       return (unsigned char *)choice;
+
+       /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
+       if (IS_ENABLED(CONFIG_X86_64))
+               random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
+       *virt_addr = random_addr;
 }
index f14db4e21654401940aab6a06c75192e05af721d..b3c5a5f030ced9e6610aeb84889561a3a0f7500e 100644 (file)
@@ -170,7 +170,8 @@ void __puthex(unsigned long value)
 }
 
 #if CONFIG_X86_NEED_RELOCS
-static void handle_relocations(void *output, unsigned long output_len)
+static void handle_relocations(void *output, unsigned long output_len,
+                              unsigned long virt_addr)
 {
        int *reloc;
        unsigned long delta, map, ptr;
@@ -182,11 +183,6 @@ static void handle_relocations(void *output, unsigned long output_len)
         * and where it was actually loaded.
         */
        delta = min_addr - LOAD_PHYSICAL_ADDR;
-       if (!delta) {
-               debug_putstr("No relocation needed... ");
-               return;
-       }
-       debug_putstr("Performing relocations... ");
 
        /*
         * The kernel contains a table of relocation addresses. Those
@@ -197,6 +193,20 @@ static void handle_relocations(void *output, unsigned long output_len)
         */
        map = delta - __START_KERNEL_map;
 
+       /*
+        * 32-bit always performs relocations. 64-bit relocations are only
+        * needed if KASLR has chosen a different starting address offset
+        * from __START_KERNEL_map.
+        */
+       if (IS_ENABLED(CONFIG_X86_64))
+               delta = virt_addr - LOAD_PHYSICAL_ADDR;
+
+       if (!delta) {
+               debug_putstr("No relocation needed... ");
+               return;
+       }
+       debug_putstr("Performing relocations... ");
+
        /*
         * Process relocations: 32 bit relocations first then 64 bit after.
         * Three sets of binary relocations are added to the end of the kernel
@@ -250,7 +260,8 @@ static void handle_relocations(void *output, unsigned long output_len)
 #endif
 }
 #else
-static inline void handle_relocations(void *output, unsigned long output_len)
+static inline void handle_relocations(void *output, unsigned long output_len,
+                                     unsigned long virt_addr)
 { }
 #endif
 
@@ -327,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
                                  unsigned long output_len)
 {
        const unsigned long kernel_total_size = VO__end - VO__text;
-       unsigned char *output_orig = output;
+       unsigned long virt_addr = (unsigned long)output;
 
        /* Retain x86 boot parameters pointer passed from startup_32/64. */
        boot_params = rmode;
@@ -366,13 +377,16 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
         * the entire decompressed kernel plus relocation table, or the
         * entire decompressed kernel plus .bss and .brk sections.
         */
-       output = choose_random_location((unsigned long)input_data, input_len,
-                                       (unsigned long)output,
-                                       max(output_len, kernel_total_size));
+       choose_random_location((unsigned long)input_data, input_len,
+                               (unsigned long *)&output,
+                               max(output_len, kernel_total_size),
+                               &virt_addr);
 
        /* Validate memory location choices. */
        if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
-               error("Destination address inappropriately aligned");
+               error("Destination physical address inappropriately aligned");
+       if (virt_addr & (MIN_KERNEL_ALIGN - 1))
+               error("Destination virtual address inappropriately aligned");
 #ifdef CONFIG_X86_64
        if (heap > 0x3fffffffffffUL)
                error("Destination address too large");
@@ -382,19 +396,16 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
 #endif
 #ifndef CONFIG_RELOCATABLE
        if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
-               error("Wrong destination address");
+               error("Destination address does not match LOAD_PHYSICAL_ADDR");
+       if ((unsigned long)output != virt_addr)
+               error("Destination virtual address changed when not relocatable");
 #endif
 
        debug_putstr("\nDecompressing Linux... ");
        __decompress(input_data, input_len, NULL, NULL, output, output_len,
                        NULL, error);
        parse_elf(output);
-       /*
-        * 32-bit always performs relocations. 64-bit relocations are only
-        * needed if kASLR has chosen a different load address.
-        */
-       if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
-               handle_relocations(output, output_len);
+       handle_relocations(output, output_len, virt_addr);
        debug_putstr("done.\nBooting the kernel.\n");
        return output;
 }
index b6fec1ff10e442bac58885af6bc54223f695480c..1c8355eadbd199027fd12da0347f5f3f5f2dcf43 100644 (file)
@@ -67,28 +67,33 @@ int cmdline_find_option_bool(const char *option);
 
 #if CONFIG_RANDOMIZE_BASE
 /* kaslr.c */
-unsigned char *choose_random_location(unsigned long input_ptr,
-                                     unsigned long input_size,
-                                     unsigned long output_ptr,
-                                     unsigned long output_size);
+void choose_random_location(unsigned long input,
+                           unsigned long input_size,
+                           unsigned long *output,
+                           unsigned long output_size,
+                           unsigned long *virt_addr);
 /* cpuflags.c */
 bool has_cpuflag(int flag);
 #else
-static inline
-unsigned char *choose_random_location(unsigned long input_ptr,
-                                     unsigned long input_size,
-                                     unsigned long output_ptr,
-                                     unsigned long output_size)
+static inline void choose_random_location(unsigned long input,
+                                         unsigned long input_size,
+                                         unsigned long *output,
+                                         unsigned long output_size,
+                                         unsigned long *virt_addr)
 {
-       return (unsigned char *)output_ptr;
+       /* No change from existing output location. */
+       *virt_addr = *output;
 }
 #endif
 
 #ifdef CONFIG_X86_64
+void initialize_identity_maps(void);
 void add_identity_map(unsigned long start, unsigned long size);
 void finalize_identity_maps(void);
 extern unsigned char _pgtable[];
 #else
+static inline void initialize_identity_maps(void)
+{ }
 static inline void add_identity_map(unsigned long start, unsigned long size)
 { }
 static inline void finalize_identity_maps(void)
index 34b95df14e694d419432cb4543f2a7bfa082b0e6..56589d0a804b1239c7e8f5dce5ea1fd237a8ae8a 100644 (file)
@@ -2,6 +2,9 @@
  * This code is used on x86_64 to create page table identity mappings on
  * demand by building up a new set of page tables (or appending to the
  * existing ones), and then switching over to them when ready.
+ *
+ * Copyright (C) 2015-2016  Yinghai Lu
+ * Copyright (C)      2016  Kees Cook
  */
 
 /*
@@ -17,6 +20,9 @@
 /* These actually do the work of building the kernel identity maps. */
 #include <asm/init.h>
 #include <asm/pgtable.h>
+/* Use the static base for this part of the boot process */
+#undef __PAGE_OFFSET
+#define __PAGE_OFFSET __PAGE_OFFSET_BASE
 #include "../../mm/ident_map.c"
 
 /* Used by pgtable.h asm code to force instruction serialization. */
@@ -59,9 +65,21 @@ static struct alloc_pgt_data pgt_data;
 /* The top level page table entry pointer. */
 static unsigned long level4p;
 
+/*
+ * Mapping information structure passed to kernel_ident_mapping_init().
+ * Due to relocation, pointers must be assigned at run time not build time.
+ */
+static struct x86_mapping_info mapping_info = {
+       .pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
+};
+
 /* Locates and clears a region for a new top level page table. */
-static void prepare_level4(void)
+void initialize_identity_maps(void)
 {
+       /* Init mapping_info with run-time function/buffer pointers. */
+       mapping_info.alloc_pgt_page = alloc_pgt_page;
+       mapping_info.context = &pgt_data;
+
        /*
         * It should be impossible for this not to already be true,
         * but since calling this a second time would rewind the other
@@ -96,17 +114,8 @@ static void prepare_level4(void)
  */
 void add_identity_map(unsigned long start, unsigned long size)
 {
-       struct x86_mapping_info mapping_info = {
-               .alloc_pgt_page = alloc_pgt_page,
-               .context        = &pgt_data,
-               .pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
-       };
        unsigned long end = start + size;
 
-       /* Make sure we have a top level page table ready to use. */
-       if (!level4p)
-               prepare_level4();
-
        /* Align boundary to 2M. */
        start = round_down(start, PMD_SIZE);
        end = round_up(end, PMD_SIZE);
index 29207f69ae8c760837984307b51afcf5377c1b49..26240dde081e82e696b9a828a191b01c7bdaf0a9 100644 (file)
@@ -93,6 +93,8 @@ int validate_cpu(void)
                show_cap_strs(err_flags);
                putchar('\n');
                return -1;
+       } else if (check_knl_erratum()) {
+               return -1;
        } else {
                return 0;
        }
index 1fd7d575092e9e51834b7b4750c48f5b567e1bce..4ad7d70e8739f638f7ff3ce6795c015a7048091c 100644 (file)
@@ -24,6 +24,7 @@
 # include "boot.h"
 #endif
 #include <linux/types.h>
+#include <asm/intel-family.h>
 #include <asm/processor-flags.h>
 #include <asm/required-features.h>
 #include <asm/msr-index.h>
@@ -175,6 +176,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
                        puts("WARNING: PAE disabled. Use parameter 'forcepae' to enable at your own risk!\n");
                }
        }
+       if (!err)
+               err = check_knl_erratum();
 
        if (err_flags_ptr)
                *err_flags_ptr = err ? err_flags : NULL;
@@ -185,3 +188,33 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
 
        return (cpu.level < req_level || err) ? -1 : 0;
 }
+
+int check_knl_erratum(void)
+{
+       /*
+        * First check for the affected model/family:
+        */
+       if (!is_intel() ||
+           cpu.family != 6 ||
+           cpu.model != INTEL_FAM6_XEON_PHI_KNL)
+               return 0;
+
+       /*
+        * This erratum affects the Accessed/Dirty bits, and can
+        * cause stray bits to be set in !Present PTEs.  We have
+        * enough bits in our 64-bit PTEs (which we have on real
+        * 64-bit mode or PAE) to avoid using these troublesome
+        * bits.  But, we do not have enough space in our 32-bit
+        * PTEs.  So, refuse to run on 32-bit non-PAE kernels.
+        */
+       if (IS_ENABLED(CONFIG_X86_64) || IS_ENABLED(CONFIG_X86_PAE))
+               return 0;
+
+       puts("This 32-bit kernel can not run on this Xeon Phi x200\n"
+            "processor due to a processor erratum.  Use a 64-bit\n"
+            "kernel, or enable PAE in this 32-bit kernel.\n\n");
+
+       return -1;
+}
+
+
index 431fa5f84537d1f8019ac2f23c4c01e0573ab955..6687ab9532575bf1d52fac34809fb94a040613f7 100644 (file)
@@ -102,6 +102,7 @@ void get_cpuflags(void)
                        cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
                              &cpu.flags[0]);
                        cpu.level = (tfms >> 8) & 15;
+                       cpu.family = cpu.level;
                        cpu.model = (tfms >> 4) & 15;
                        if (cpu.level >= 6)
                                cpu.model += ((tfms >> 16) & 0xf) << 4;
index 4cb404fd45ceaa0e89f9e669d6a59de4ce378449..15ad56a3f9052174a65b60076f408985a0eec0d8 100644 (file)
@@ -6,6 +6,7 @@
 
 struct cpu_features {
        int level;              /* Family, or 64 for x86-64 */
+       int family;             /* Family, always */
        int model;
        u32 flags[NCAPINTS];
 };
index 318b8465d30204cad7006bf0889672e0da093aed..cc3bd583dce1abc5fafe6b92417b8a713cd4cdfd 100644 (file)
@@ -17,7 +17,7 @@
 
 int memcmp(const void *s1, const void *s2, size_t len)
 {
-       u8 diff;
+       bool diff;
        asm("repe; cmpsb; setnz %0"
            : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
        return diff;
index ec138e538c44f9acf7f829827d14a3991473763f..9e1e27d31c6d6d9425f5fe4b0d916c26bd5d0abd 100644 (file)
@@ -40,10 +40,10 @@ static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
 
 #ifdef CONFIG_CONTEXT_TRACKING
 /* Called on entry from user mode with IRQs off. */
-__visible void enter_from_user_mode(void)
+__visible inline void enter_from_user_mode(void)
 {
        CT_WARN_ON(ct_state() != CONTEXT_USER);
-       user_exit();
+       user_exit_irqoff();
 }
 #else
 static inline void enter_from_user_mode(void) {}
@@ -274,7 +274,7 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
        ti->status &= ~TS_COMPAT;
 #endif
 
-       user_enter();
+       user_enter_irqoff();
 }
 
 #define SYSCALL_EXIT_WORK_FLAGS                                \
index 983e5d3a0d271c387e24371ddc3fcd6e7110a27d..0b56666e6039b9d090cc01c014ccba2b2662be88 100644 (file)
@@ -1153,3 +1153,14 @@ ENTRY(async_page_fault)
        jmp     error_code
 END(async_page_fault)
 #endif
+
+ENTRY(rewind_stack_do_exit)
+       /* Prevent any naive code from trying to unwind to our caller. */
+       xorl    %ebp, %ebp
+
+       movl    PER_CPU_VAR(cpu_current_top_of_stack), %esi
+       leal    -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
+
+       call    do_exit
+1:     jmp 1b
+END(rewind_stack_do_exit)
index 9ee0da1807edff462536e3628e1db217bb835cd8..b846875aeea6792ce4a0c359f7635112f27e84d5 100644 (file)
@@ -1423,3 +1423,14 @@ ENTRY(ignore_sysret)
        mov     $-ENOSYS, %eax
        sysret
 END(ignore_sysret)
+
+ENTRY(rewind_stack_do_exit)
+       /* Prevent any naive code from trying to unwind to our caller. */
+       xorl    %ebp, %ebp
+
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rax
+       leaq    -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
+
+       call    do_exit
+1:     jmp 1b
+END(rewind_stack_do_exit)
index 555263e385c9210af5f70e08dd27871005c5a865..e9ce9c7c39b48ca5e439735c87d6fe5b138d7fbc 100644 (file)
 543    x32     io_setup                compat_sys_io_setup
 544    x32     io_submit               compat_sys_io_submit
 545    x32     execveat                compat_sys_execveat/ptregs
-534    x32     preadv2                 compat_sys_preadv2
-535    x32     pwritev2                compat_sys_pwritev2
+546    x32     preadv2                 compat_sys_preadv64v2
+547    x32     pwritev2                compat_sys_pwritev64v2
index 027aec4a74df6a99c2e5733e2f68ff5fb5fbc19a..627ecbcb2e6267796b634e05b16873285300a977 100644 (file)
@@ -33,7 +33,7 @@
        .endif
 
        call \func
-       jmp  restore
+       jmp  .L_restore
        _ASM_NOKPROBE(\name)
        .endm
 
@@ -54,7 +54,7 @@
 #if defined(CONFIG_TRACE_IRQFLAGS) \
  || defined(CONFIG_DEBUG_LOCK_ALLOC) \
  || defined(CONFIG_PREEMPT)
-restore:
+.L_restore:
        popq %r11
        popq %r10
        popq %r9
@@ -66,5 +66,5 @@ restore:
        popq %rdi
        popq %rbp
        ret
-       _ASM_NOKPROBE(restore)
+       _ASM_NOKPROBE(.L_restore)
 #endif
index 253b72eaade6b538905acb4c7a8329b04480655a..68b63fddc209f57248cf1b01927f85a57da9febb 100644 (file)
@@ -134,7 +134,7 @@ VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
 override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
 
 targets += vdso32/vdso32.lds
-targets += vdso32/note.o vdso32/vclock_gettime.o vdso32/system_call.o
+targets += vdso32/note.o vdso32/system_call.o vdso32/sigreturn.o
 targets += vdso32/vclock_gettime.o
 
 KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
@@ -156,7 +156,8 @@ $(obj)/vdso32.so.dbg: FORCE \
                      $(obj)/vdso32/vdso32.lds \
                      $(obj)/vdso32/vclock_gettime.o \
                      $(obj)/vdso32/note.o \
-                     $(obj)/vdso32/system_call.o
+                     $(obj)/vdso32/system_call.o \
+                     $(obj)/vdso32/sigreturn.o
        $(call if_changed,vdso)
 
 #
index d7ec4e251c0a2e53572438891508dfeb4a3a016a..20633e026e82cc4b9a28d6698394cab37ec096a8 100644 (file)
@@ -1,11 +1,3 @@
-/*
- * Common code for the sigreturn entry points in vDSO images.
- * So far this code is the same for both int80 and sysenter versions.
- * This file is #include'd by int80.S et al to define them first thing.
- * The kernel assumes that the addresses of these routines are constant
- * for all vDSO implementations.
- */
-
 #include <linux/linkage.h>
 #include <asm/unistd_32.h>
 #include <asm/asm-offsets.h>
index 0109ac6cb79cc73a5d74c99bc2c97705a9ca5c7d..ed4bc9731cbbc72c64aa6431100f6c80816f62a5 100644 (file)
@@ -2,16 +2,11 @@
  * AT_SYSINFO entry point
 */
 
+#include <linux/linkage.h>
 #include <asm/dwarf2.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
-/*
- * First get the common code for the sigreturn entry points.
- * This must come first.
- */
-#include "sigreturn.S"
-
        .text
        .globl __kernel_vsyscall
        .type __kernel_vsyscall,@function
index ab220ac9b3b98f8eaeae4e9672d3e92a473b03f1..3329844e3c43913d278af48b4a93f5d1e1ce2ccc 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/random.h>
 #include <linux/elf.h>
 #include <linux/cpu.h>
+#include <linux/ptrace.h>
 #include <asm/pvclock.h>
 #include <asm/vgtod.h>
 #include <asm/proto.h>
@@ -97,10 +98,40 @@ static int vdso_fault(const struct vm_special_mapping *sm,
        return 0;
 }
 
-static const struct vm_special_mapping text_mapping = {
-       .name = "[vdso]",
-       .fault = vdso_fault,
-};
+static void vdso_fix_landing(const struct vdso_image *image,
+               struct vm_area_struct *new_vma)
+{
+#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
+       if (in_ia32_syscall() && image == &vdso_image_32) {
+               struct pt_regs *regs = current_pt_regs();
+               unsigned long vdso_land = image->sym_int80_landing_pad;
+               unsigned long old_land_addr = vdso_land +
+                       (unsigned long)current->mm->context.vdso;
+
+               /* Fixing userspace landing - look at do_fast_syscall_32 */
+               if (regs->ip == old_land_addr)
+                       regs->ip = new_vma->vm_start + vdso_land;
+       }
+#endif
+}
+
+static int vdso_mremap(const struct vm_special_mapping *sm,
+               struct vm_area_struct *new_vma)
+{
+       unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
+       const struct vdso_image *image = current->mm->context.vdso_image;
+
+       if (image->size != new_size)
+               return -EINVAL;
+
+       if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
+               return -EFAULT;
+
+       vdso_fix_landing(image, new_vma);
+       current->mm->context.vdso = (void __user *)new_vma->vm_start;
+
+       return 0;
+}
 
 static int vvar_fault(const struct vm_special_mapping *sm,
                      struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -151,6 +182,12 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
        struct vm_area_struct *vma;
        unsigned long addr, text_start;
        int ret = 0;
+
+       static const struct vm_special_mapping vdso_mapping = {
+               .name = "[vdso]",
+               .fault = vdso_fault,
+               .mremap = vdso_mremap,
+       };
        static const struct vm_special_mapping vvar_mapping = {
                .name = "[vvar]",
                .fault = vvar_fault,
@@ -185,7 +222,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
                                       image->size,
                                       VM_READ|VM_EXEC|
                                       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-                                      &text_mapping);
+                                      &vdso_mapping);
 
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
index 174c2549939d6d184b3c0e01e9d08e23356dacbe..75fc719b7f31a57d27e120a9a23cd2dbe8340beb 100644 (file)
@@ -96,7 +96,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
 {
        /*
         * XXX: if access_ok, get_user, and put_user handled
-        * sig_on_uaccess_error, this could go away.
+        * sig_on_uaccess_err, this could go away.
         */
 
        if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
@@ -125,7 +125,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
        struct task_struct *tsk;
        unsigned long caller;
        int vsyscall_nr, syscall_nr, tmp;
-       int prev_sig_on_uaccess_error;
+       int prev_sig_on_uaccess_err;
        long ret;
 
        /*
@@ -221,8 +221,8 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
         * With a real vsyscall, page faults cause SIGSEGV.  We want to
         * preserve that behavior to make writing exploits harder.
         */
-       prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
-       current_thread_info()->sig_on_uaccess_error = 1;
+       prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
+       current->thread.sig_on_uaccess_err = 1;
 
        ret = -EFAULT;
        switch (vsyscall_nr) {
@@ -243,7 +243,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
                break;
        }
 
-       current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
+       current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
 
 check_fault:
        if (ret == -EFAULT) {
index 33787ee817f0cdaad78814849b0aad2dc7e2b407..dfebbde2a4cc252a7bc8ab2cd5f84c679df9336c 100644 (file)
@@ -263,7 +263,7 @@ static bool check_hw_exists(void)
 
 msr_fail:
        pr_cont("Broken PMU hardware detected, using software events only.\n");
-       pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+       printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
                boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
                reg, val_new);
 
@@ -1622,6 +1622,29 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, cha
 }
 EXPORT_SYMBOL_GPL(events_sysfs_show);
 
+ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
+                         char *page)
+{
+       struct perf_pmu_events_ht_attr *pmu_attr =
+               container_of(attr, struct perf_pmu_events_ht_attr, attr);
+
+       /*
+        * Report conditional events depending on Hyper-Threading.
+        *
+        * This is overly conservative as usually the HT special
+        * handling is not needed if the other CPU thread is idle.
+        *
+        * Note this does not (and cannot) handle the case when thread
+        * siblings are invisible, for example with virtualization
+        * if they are owned by some other guest.  The user tool
+        * has to re-read when a thread sibling gets onlined later.
+        */
+       return sprintf(page, "%s",
+                       topology_max_smt_threads() > 1 ?
+                       pmu_attr->event_str_ht :
+                       pmu_attr->event_str_noht);
+}
+
 EVENT_ATTR(cpu-cycles,                 CPU_CYCLES              );
 EVENT_ATTR(instructions,               INSTRUCTIONS            );
 EVENT_ATTR(cache-references,           CACHE_REFERENCES        );
@@ -2319,7 +2342,7 @@ void
 perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 {
        struct stack_frame frame;
-       const void __user *fp;
+       const unsigned long __user *fp;
 
        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
                /* TODO: We don't support guest os callchain now */
@@ -2332,7 +2355,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
        if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
                return;
 
-       fp = (void __user *)regs->bp;
+       fp = (unsigned long __user *)regs->bp;
 
        perf_callchain_store(entry, regs->ip);
 
@@ -2345,16 +2368,17 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
        pagefault_disable();
        while (entry->nr < entry->max_stack) {
                unsigned long bytes;
+
                frame.next_frame             = NULL;
                frame.return_address = 0;
 
-               if (!access_ok(VERIFY_READ, fp, 16))
+               if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
                        break;
 
-               bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
+               bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
                if (bytes != 0)
                        break;
-               bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
+               bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
                if (bytes != 0)
                        break;
 
index 3660b2cf245ad7d22eaf0f9d1cf2602d270b62c2..06c2baa518144336133f775299b086d0ac4306ec 100644 (file)
@@ -1,8 +1,8 @@
 obj-$(CONFIG_CPU_SUP_INTEL)            += core.o bts.o cqm.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += ds.o knc.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += lbr.o p4.o p6.o pt.o
-obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL)   += intel-rapl.o
-intel-rapl-objs                                := rapl.o
+obj-$(CONFIG_PERF_EVENTS_INTEL_RAPL)   += intel-rapl-perf.o
+intel-rapl-perf-objs                   := rapl.o
 obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += intel-uncore.o
 intel-uncore-objs                      := uncore.o uncore_nhmex.o uncore_snb.o uncore_snbep.o
 obj-$(CONFIG_PERF_EVENTS_INTEL_CSTATE) += intel-cstate.o
index 7c666958a6250354aa204d24e73f94670264ffee..0974ba11e9544a169a5071806f409aed9682291b 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <asm/cpufeature.h>
 #include <asm/hardirq.h>
+#include <asm/intel-family.h>
 #include <asm/apic.h>
 
 #include "../perf_event.h"
@@ -115,6 +116,10 @@ static struct event_constraint intel_snb_event_constraints[] __read_mostly =
        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 
+       /*
+        * When HT is off these events can only run on the bottom 4 counters
+        * When HT is on, they are impacted by the HT bug and require EXCL access
+        */
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -139,6 +144,10 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 
+       /*
+        * When HT is off these events can only run on the bottom 4 counters
+        * When HT is on, they are impacted by the HT bug and require EXCL access
+        */
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -177,19 +186,27 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
        EVENT_CONSTRAINT_END
 };
 
-struct event_constraint intel_skl_event_constraints[] = {
+static struct event_constraint intel_skl_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
        INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),    /* INST_RETIRED.PREC_DIST */
+
+       /*
+        * when HT is off, these can only run on the bottom 4 counters
+        */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xc6, 0xf),      /* FRONTEND_RETIRED.* */
+
        EVENT_CONSTRAINT_END
 };
 
 static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
-       INTEL_UEVENT_EXTRA_REG(0x01b7,
-                              MSR_OFFCORE_RSP_0, 0x7f9ffbffffull, RSP_0),
-       INTEL_UEVENT_EXTRA_REG(0x02b7,
-                              MSR_OFFCORE_RSP_1, 0x3f9ffbffffull, RSP_1),
+       INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
+       INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
        EVENT_EXTRA_END
 };
 
@@ -225,14 +242,51 @@ EVENT_ATTR_STR(mem-loads, mem_ld_nhm,     "event=0x0b,umask=0x10,ldlat=3");
 EVENT_ATTR_STR(mem-loads,      mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
 EVENT_ATTR_STR(mem-stores,     mem_st_snb,     "event=0xcd,umask=0x2");
 
-struct attribute *nhm_events_attrs[] = {
+static struct attribute *nhm_events_attrs[] = {
        EVENT_PTR(mem_ld_nhm),
        NULL,
 };
 
-struct attribute *snb_events_attrs[] = {
+/*
+ * topdown events for Intel Core CPUs.
+ *
+ * The events are all in slots, which is a free slot in a 4 wide
+ * pipeline. Some events are already reported in slots, for cycle
+ * events we multiply by the pipeline width (4).
+ *
+ * With Hyper Threading on, topdown metrics are either summed or averaged
+ * between the threads of a core: (count_t0 + count_t1).
+ *
+ * For the average case the metric is always scaled to pipeline width,
+ * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
+ */
+
+EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
+       "event=0x3c,umask=0x0",                 /* cpu_clk_unhalted.thread */
+       "event=0x3c,umask=0x0,any=1");          /* cpu_clk_unhalted.thread_any */
+EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
+EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
+       "event=0xe,umask=0x1");                 /* uops_issued.any */
+EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
+       "event=0xc2,umask=0x2");                /* uops_retired.retire_slots */
+EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
+       "event=0x9c,umask=0x1");                /* idq_uops_not_delivered_core */
+EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
+       "event=0xd,umask=0x3,cmask=1",          /* int_misc.recovery_cycles */
+       "event=0xd,umask=0x3,cmask=1,any=1");   /* int_misc.recovery_cycles_any */
+EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
+       "4", "2");
+
+static struct attribute *snb_events_attrs[] = {
        EVENT_PTR(mem_ld_snb),
        EVENT_PTR(mem_st_snb),
+       EVENT_PTR(td_slots_issued),
+       EVENT_PTR(td_slots_retired),
+       EVENT_PTR(td_fetch_bubbles),
+       EVENT_PTR(td_total_slots),
+       EVENT_PTR(td_total_slots_scale),
+       EVENT_PTR(td_recovery_bubbles),
+       EVENT_PTR(td_recovery_bubbles_scale),
        NULL,
 };
 
@@ -250,6 +304,10 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
 
+       /*
+        * When HT is off these events can only run on the bottom 4 counters
+        * When HT is on, they are impacted by the HT bug and require EXCL access
+        */
        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
@@ -258,12 +316,19 @@ static struct event_constraint intel_hsw_event_constraints[] = {
        EVENT_CONSTRAINT_END
 };
 
-struct event_constraint intel_bdw_event_constraints[] = {
+static struct event_constraint intel_bdw_event_constraints[] = {
        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
        INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),        /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
+       /*
+        * when HT is off, these can only run on the bottom 4 counters
+        */
+       INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
+       INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
        EVENT_CONSTRAINT_END
 };
 
@@ -1332,6 +1397,29 @@ static __initconst const u64 atom_hw_cache_event_ids
  },
 };
 
+EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
+EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
+/* no_alloc_cycles.not_delivered */
+EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
+              "event=0xca,umask=0x50");
+EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
+/* uops_retired.all */
+EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
+              "event=0xc2,umask=0x10");
+/* uops_retired.all */
+EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
+              "event=0xc2,umask=0x10");
+
+static struct attribute *slm_events_attrs[] = {
+       EVENT_PTR(td_total_slots_slm),
+       EVENT_PTR(td_total_slots_scale_slm),
+       EVENT_PTR(td_fetch_bubbles_slm),
+       EVENT_PTR(td_fetch_bubbles_scale_slm),
+       EVENT_PTR(td_slots_issued_slm),
+       EVENT_PTR(td_slots_retired_slm),
+       NULL
+};
+
 static struct extra_reg intel_slm_extra_regs[] __read_mostly =
 {
        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
@@ -3261,11 +3349,11 @@ static int intel_snb_pebs_broken(int cpu)
        u32 rev = UINT_MAX; /* default to broken for unknown models */
 
        switch (cpu_data(cpu).x86_model) {
-       case 42: /* SNB */
+       case INTEL_FAM6_SANDYBRIDGE:
                rev = 0x28;
                break;
 
-       case 45: /* SNB-EP */
+       case INTEL_FAM6_SANDYBRIDGE_X:
                switch (cpu_data(cpu).x86_mask) {
                case 6: rev = 0x618; break;
                case 7: rev = 0x70c; break;
@@ -3302,6 +3390,13 @@ static void intel_snb_check_microcode(void)
        }
 }
 
+static bool is_lbr_from(unsigned long msr)
+{
+       unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
+
+       return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
+}
+
 /*
  * Under certain circumstances, access certain MSR may cause #GP.
  * The function tests if the input MSR can be safely accessed.
@@ -3322,13 +3417,24 @@ static bool check_msr(unsigned long msr, u64 mask)
         * Only change the bits which can be updated by wrmsrl.
         */
        val_tmp = val_old ^ mask;
+
+       if (is_lbr_from(msr))
+               val_tmp = lbr_from_signext_quirk_wr(val_tmp);
+
        if (wrmsrl_safe(msr, val_tmp) ||
            rdmsrl_safe(msr, &val_new))
                return false;
 
+       /*
+        * Quirk only affects validation in wrmsr(), so wrmsrl()'s value
+        * should equal rdmsrl()'s even with the quirk.
+        */
        if (val_new != val_tmp)
                return false;
 
+       if (is_lbr_from(msr))
+               val_old = lbr_from_signext_quirk_wr(val_old);
+
        /* Here it's sure that the MSR can be safely accessed.
         * Restore the old value and return.
         */
@@ -3437,6 +3543,13 @@ static struct attribute *hsw_events_attrs[] = {
        EVENT_PTR(cycles_ct),
        EVENT_PTR(mem_ld_hsw),
        EVENT_PTR(mem_st_hsw),
+       EVENT_PTR(td_slots_issued),
+       EVENT_PTR(td_slots_retired),
+       EVENT_PTR(td_fetch_bubbles),
+       EVENT_PTR(td_total_slots),
+       EVENT_PTR(td_total_slots_scale),
+       EVENT_PTR(td_recovery_bubbles),
+       EVENT_PTR(td_recovery_bubbles_scale),
        NULL
 };
 
@@ -3508,15 +3621,15 @@ __init int intel_pmu_init(void)
         * Install the hw-cache-events table:
         */
        switch (boot_cpu_data.x86_model) {
-       case 14: /* 65nm Core "Yonah" */
+       case INTEL_FAM6_CORE_YONAH:
                pr_cont("Core events, ");
                break;
 
-       case 15: /* 65nm Core2 "Merom"          */
+       case INTEL_FAM6_CORE2_MEROM:
                x86_add_quirk(intel_clovertown_quirk);
-       case 22: /* 65nm Core2 "Merom-L"        */
-       case 23: /* 45nm Core2 "Penryn"         */
-       case 29: /* 45nm Core2 "Dunnington (MP) */
+       case INTEL_FAM6_CORE2_MEROM_L:
+       case INTEL_FAM6_CORE2_PENRYN:
+       case INTEL_FAM6_CORE2_DUNNINGTON:
                memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -3527,9 +3640,9 @@ __init int intel_pmu_init(void)
                pr_cont("Core2 events, ");
                break;
 
-       case 30: /* 45nm Nehalem    */
-       case 26: /* 45nm Nehalem-EP */
-       case 46: /* 45nm Nehalem-EX */
+       case INTEL_FAM6_NEHALEM:
+       case INTEL_FAM6_NEHALEM_EP:
+       case INTEL_FAM6_NEHALEM_EX:
                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -3557,11 +3670,11 @@ __init int intel_pmu_init(void)
                pr_cont("Nehalem events, ");
                break;
 
-       case 28: /* 45nm Atom "Pineview"   */
-       case 38: /* 45nm Atom "Lincroft"   */
-       case 39: /* 32nm Atom "Penwell"    */
-       case 53: /* 32nm Atom "Cloverview" */
-       case 54: /* 32nm Atom "Cedarview"  */
+       case INTEL_FAM6_ATOM_PINEVIEW:
+       case INTEL_FAM6_ATOM_LINCROFT:
+       case INTEL_FAM6_ATOM_PENWELL:
+       case INTEL_FAM6_ATOM_CLOVERVIEW:
+       case INTEL_FAM6_ATOM_CEDARVIEW:
                memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
@@ -3573,9 +3686,9 @@ __init int intel_pmu_init(void)
                pr_cont("Atom events, ");
                break;
 
-       case 55: /* 22nm Atom "Silvermont"                */
-       case 76: /* 14nm Atom "Airmont"                   */
-       case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
+       case INTEL_FAM6_ATOM_SILVERMONT1:
+       case INTEL_FAM6_ATOM_SILVERMONT2:
+       case INTEL_FAM6_ATOM_AIRMONT:
                memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
                        sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
@@ -3587,11 +3700,12 @@ __init int intel_pmu_init(void)
                x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
                x86_pmu.extra_regs = intel_slm_extra_regs;
                x86_pmu.flags |= PMU_FL_HAS_RSP_1;
+               x86_pmu.cpu_events = slm_events_attrs;
                pr_cont("Silvermont events, ");
                break;
 
-       case 92: /* 14nm Atom "Goldmont" */
-       case 95: /* 14nm Atom "Goldmont Denverton" */
+       case INTEL_FAM6_ATOM_GOLDMONT:
+       case INTEL_FAM6_ATOM_DENVERTON:
                memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
@@ -3614,9 +3728,9 @@ __init int intel_pmu_init(void)
                pr_cont("Goldmont events, ");
                break;
 
-       case 37: /* 32nm Westmere    */
-       case 44: /* 32nm Westmere-EP */
-       case 47: /* 32nm Westmere-EX */
+       case INTEL_FAM6_WESTMERE:
+       case INTEL_FAM6_WESTMERE_EP:
+       case INTEL_FAM6_WESTMERE_EX:
                memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
@@ -3643,8 +3757,8 @@ __init int intel_pmu_init(void)
                pr_cont("Westmere events, ");
                break;
 
-       case 42: /* 32nm SandyBridge         */
-       case 45: /* 32nm SandyBridge-E/EN/EP */
+       case INTEL_FAM6_SANDYBRIDGE:
+       case INTEL_FAM6_SANDYBRIDGE_X:
                x86_add_quirk(intel_sandybridge_quirk);
                x86_add_quirk(intel_ht_bug);
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
@@ -3657,7 +3771,7 @@ __init int intel_pmu_init(void)
                x86_pmu.event_constraints = intel_snb_event_constraints;
                x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
-               if (boot_cpu_data.x86_model == 45)
+               if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
                        x86_pmu.extra_regs = intel_snbep_extra_regs;
                else
                        x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -3679,8 +3793,8 @@ __init int intel_pmu_init(void)
                pr_cont("SandyBridge events, ");
                break;
 
-       case 58: /* 22nm IvyBridge       */
-       case 62: /* 22nm IvyBridge-EP/EX */
+       case INTEL_FAM6_IVYBRIDGE:
+       case INTEL_FAM6_IVYBRIDGE_X:
                x86_add_quirk(intel_ht_bug);
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
@@ -3696,7 +3810,7 @@ __init int intel_pmu_init(void)
                x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
                x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
                x86_pmu.pebs_prec_dist = true;
-               if (boot_cpu_data.x86_model == 62)
+               if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
                        x86_pmu.extra_regs = intel_snbep_extra_regs;
                else
                        x86_pmu.extra_regs = intel_snb_extra_regs;
@@ -3714,10 +3828,10 @@ __init int intel_pmu_init(void)
                break;
 
 
-       case 60: /* 22nm Haswell Core */
-       case 63: /* 22nm Haswell Server */
-       case 69: /* 22nm Haswell ULT */
-       case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
+       case INTEL_FAM6_HASWELL_CORE:
+       case INTEL_FAM6_HASWELL_X:
+       case INTEL_FAM6_HASWELL_ULT:
+       case INTEL_FAM6_HASWELL_GT3E:
                x86_add_quirk(intel_ht_bug);
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
@@ -3741,10 +3855,10 @@ __init int intel_pmu_init(void)
                pr_cont("Haswell events, ");
                break;
 
-       case 61: /* 14nm Broadwell Core-M */
-       case 86: /* 14nm Broadwell Xeon D */
-       case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
-       case 79: /* 14nm Broadwell Server */
+       case INTEL_FAM6_BROADWELL_CORE:
+       case INTEL_FAM6_BROADWELL_XEON_D:
+       case INTEL_FAM6_BROADWELL_GT3E:
+       case INTEL_FAM6_BROADWELL_X:
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -3777,7 +3891,7 @@ __init int intel_pmu_init(void)
                pr_cont("Broadwell events, ");
                break;
 
-       case 87: /* Knights Landing Xeon Phi */
+       case INTEL_FAM6_XEON_PHI_KNL:
                memcpy(hw_cache_event_ids,
                       slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs,
@@ -3795,16 +3909,22 @@ __init int intel_pmu_init(void)
                pr_cont("Knights Landing events, ");
                break;
 
-       case 142: /* 14nm Kabylake Mobile */
-       case 158: /* 14nm Kabylake Desktop */
-       case 78: /* 14nm Skylake Mobile */
-       case 94: /* 14nm Skylake Desktop */
-       case 85: /* 14nm Skylake Server */
+       case INTEL_FAM6_SKYLAKE_MOBILE:
+       case INTEL_FAM6_SKYLAKE_DESKTOP:
+       case INTEL_FAM6_SKYLAKE_X:
+       case INTEL_FAM6_KABYLAKE_MOBILE:
+       case INTEL_FAM6_KABYLAKE_DESKTOP:
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
                intel_pmu_lbr_init_skl();
 
+               /* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
+               event_attr_td_recovery_bubbles.event_str_noht =
+                       "event=0xd,umask=0x1,cmask=1";
+               event_attr_td_recovery_bubbles.event_str_ht =
+                       "event=0xd,umask=0x1,cmask=1,any=1";
+
                x86_pmu.event_constraints = intel_skl_event_constraints;
                x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
                x86_pmu.extra_regs = intel_skl_extra_regs;
@@ -3885,6 +4005,8 @@ __init int intel_pmu_init(void)
                        x86_pmu.lbr_nr = 0;
        }
 
+       if (x86_pmu.lbr_nr)
+               pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
        /*
         * Access extra MSR may cause #GP under certain circumstances.
         * E.g. KVM doesn't support offcore event
@@ -3917,16 +4039,14 @@ __init int intel_pmu_init(void)
  */
 static __init int fixup_ht_bug(void)
 {
-       int cpu = smp_processor_id();
-       int w, c;
+       int c;
        /*
         * problem not present on this CPU model, nothing to do
         */
        if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
                return 0;
 
-       w = cpumask_weight(topology_sibling_cpumask(cpu));
-       if (w > 1) {
+       if (topology_max_smt_threads() > 1) {
                pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
                return 0;
        }
index 9ba4e4136a1539ba791052b726c12343694ea85f..4c7638b91fa56ea2dde92d50a818a3d8d3b7d1da 100644 (file)
@@ -89,6 +89,7 @@
 #include <linux/slab.h>
 #include <linux/perf_event.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include "../perf_event.h"
 
 MODULE_LICENSE("GPL");
@@ -511,37 +512,37 @@ static const struct cstate_model slm_cstates __initconst = {
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
 
 static const struct x86_cpu_id intel_cstates_match[] __initconst = {
-       X86_CSTATES_MODEL(30, nhm_cstates),    /* 45nm Nehalem              */
-       X86_CSTATES_MODEL(26, nhm_cstates),    /* 45nm Nehalem-EP           */
-       X86_CSTATES_MODEL(46, nhm_cstates),    /* 45nm Nehalem-EX           */
+       X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM,    nhm_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates),
 
-       X86_CSTATES_MODEL(37, nhm_cstates),    /* 32nm Westmere             */
-       X86_CSTATES_MODEL(44, nhm_cstates),    /* 32nm Westmere-EP          */
-       X86_CSTATES_MODEL(47, nhm_cstates),    /* 32nm Westmere-EX          */
+       X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE,    nhm_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates),
 
-       X86_CSTATES_MODEL(42, snb_cstates),    /* 32nm SandyBridge          */
-       X86_CSTATES_MODEL(45, snb_cstates),    /* 32nm SandyBridge-E/EN/EP  */
+       X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE,   snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates),
 
-       X86_CSTATES_MODEL(58, snb_cstates),    /* 22nm IvyBridge            */
-       X86_CSTATES_MODEL(62, snb_cstates),    /* 22nm IvyBridge-EP/EX      */
+       X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE,   snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates),
 
-       X86_CSTATES_MODEL(60, snb_cstates),    /* 22nm Haswell Core         */
-       X86_CSTATES_MODEL(63, snb_cstates),    /* 22nm Haswell Server       */
-       X86_CSTATES_MODEL(70, snb_cstates),    /* 22nm Haswell + GT3e       */
+       X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X,    snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates),
 
-       X86_CSTATES_MODEL(69, hswult_cstates), /* 22nm Haswell ULT          */
+       X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
 
-       X86_CSTATES_MODEL(55, slm_cstates),    /* 22nm Atom Silvermont      */
-       X86_CSTATES_MODEL(77, slm_cstates),    /* 22nm Atom Avoton/Rangely  */
-       X86_CSTATES_MODEL(76, slm_cstates),    /* 22nm Atom Airmont         */
+       X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT,     slm_cstates),
 
-       X86_CSTATES_MODEL(61, snb_cstates),    /* 14nm Broadwell Core-M     */
-       X86_CSTATES_MODEL(86, snb_cstates),    /* 14nm Broadwell Xeon D     */
-       X86_CSTATES_MODEL(71, snb_cstates),    /* 14nm Broadwell + GT3e     */
-       X86_CSTATES_MODEL(79, snb_cstates),    /* 14nm Broadwell Server     */
+       X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE,   snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E,   snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X,      snb_cstates),
 
-       X86_CSTATES_MODEL(78, snb_cstates),    /* 14nm Skylake Mobile       */
-       X86_CSTATES_MODEL(94, snb_cstates),    /* 14nm Skylake Desktop      */
+       X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE,  snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
index 9e2b40cdb05f8c68061e43a14b04aa3aace3429c..707d358e0dff59ed5871bc49ceda0462c4f818be 100644 (file)
@@ -77,9 +77,11 @@ static enum {
         LBR_IND_JMP    |\
         LBR_FAR)
 
-#define LBR_FROM_FLAG_MISPRED  (1ULL << 63)
-#define LBR_FROM_FLAG_IN_TX    (1ULL << 62)
-#define LBR_FROM_FLAG_ABORT    (1ULL << 61)
+#define LBR_FROM_FLAG_MISPRED  BIT_ULL(63)
+#define LBR_FROM_FLAG_IN_TX    BIT_ULL(62)
+#define LBR_FROM_FLAG_ABORT    BIT_ULL(61)
+
+#define LBR_FROM_SIGNEXT_2MSB  (BIT_ULL(60) | BIT_ULL(59))
 
 /*
  * x86control flow change classification
@@ -235,6 +237,97 @@ enum {
        LBR_VALID,
 };
 
+/*
+ * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
+ * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
+ * TSX is not supported they have no consistent behavior:
+ *
+ *   - For wrmsr(), bits 61:62 are considered part of the sign extension.
+ *   - For HW updates (branch captures) bits 61:62 are always OFF and are not
+ *     part of the sign extension.
+ *
+ * Therefore, if:
+ *
+ *   1) LBR has TSX format
+ *   2) CPU has no TSX support enabled
+ *
+ * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
+ * value from rdmsr() must be converted to have a 61 bits sign extension,
+ * ignoring the TSX flags.
+ */
+static inline bool lbr_from_signext_quirk_needed(void)
+{
+       int lbr_format = x86_pmu.intel_cap.lbr_format;
+       bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
+                          boot_cpu_has(X86_FEATURE_RTM);
+
+       return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
+}
+
+DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
+
+/* If quirk is enabled, ensure sign extension is 63 bits: */
+inline u64 lbr_from_signext_quirk_wr(u64 val)
+{
+       if (static_branch_unlikely(&lbr_from_quirk_key)) {
+               /*
+                * Sign extend into bits 61:62 while preserving bit 63.
+                *
+                * Quirk is enabled when TSX is disabled. Therefore TSX bits
+                * in val are always OFF and must be changed to be sign
+                * extension bits. Since bits 59:60 are guaranteed to be
+                * part of the sign extension bits, we can just copy them
+                * to 61:62.
+                */
+               val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
+       }
+       return val;
+}
+
+/*
+ * If quirk is needed, ensure sign extension is 61 bits:
+ */
+u64 lbr_from_signext_quirk_rd(u64 val)
+{
+       if (static_branch_unlikely(&lbr_from_quirk_key)) {
+               /*
+                * Quirk is on when TSX is not enabled. Therefore TSX
+                * flags must be read as OFF.
+                */
+               val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
+       }
+       return val;
+}
+
+static inline void wrlbr_from(unsigned int idx, u64 val)
+{
+       val = lbr_from_signext_quirk_wr(val);
+       wrmsrl(x86_pmu.lbr_from + idx, val);
+}
+
+static inline void wrlbr_to(unsigned int idx, u64 val)
+{
+       wrmsrl(x86_pmu.lbr_to + idx, val);
+}
+
+static inline u64 rdlbr_from(unsigned int idx)
+{
+       u64 val;
+
+       rdmsrl(x86_pmu.lbr_from + idx, val);
+
+       return lbr_from_signext_quirk_rd(val);
+}
+
+static inline u64 rdlbr_to(unsigned int idx)
+{
+       u64 val;
+
+       rdmsrl(x86_pmu.lbr_to + idx, val);
+
+       return val;
+}
+
 static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
 {
        int i;
@@ -251,8 +344,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
        tos = task_ctx->tos;
        for (i = 0; i < tos; i++) {
                lbr_idx = (tos - i) & mask;
-               wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
-               wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+               wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
+               wrlbr_to  (lbr_idx, task_ctx->lbr_to[i]);
+
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
@@ -262,9 +356,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
 
 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
 {
-       int i;
        unsigned lbr_idx, mask;
        u64 tos;
+       int i;
 
        if (task_ctx->lbr_callstack_users == 0) {
                task_ctx->lbr_stack_state = LBR_NONE;
@@ -275,8 +369,8 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
        tos = intel_pmu_lbr_tos();
        for (i = 0; i < tos; i++) {
                lbr_idx = (tos - i) & mask;
-               rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
-               rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
+               task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+               task_ctx->lbr_to[i]   = rdlbr_to(lbr_idx);
                if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
                        rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
        }
@@ -452,8 +546,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
                u16 cycles = 0;
                int lbr_flags = lbr_desc[lbr_format];
 
-               rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
-               rdmsrl(x86_pmu.lbr_to   + lbr_idx, to);
+               from = rdlbr_from(lbr_idx);
+               to   = rdlbr_to(lbr_idx);
 
                if (lbr_format == LBR_FORMAT_INFO && need_info) {
                        u64 info;
@@ -956,7 +1050,6 @@ void __init intel_pmu_lbr_init_core(void)
         * SW branch filter usage:
         * - compensate for lack of HW filter
         */
-       pr_cont("4-deep LBR, ");
 }
 
 /* nehalem/westmere */
@@ -977,7 +1070,6 @@ void __init intel_pmu_lbr_init_nhm(void)
         *   That requires LBR_FAR but that means far
         *   jmp need to be filtered out
         */
-       pr_cont("16-deep LBR, ");
 }
 
 /* sandy bridge */
@@ -997,7 +1089,6 @@ void __init intel_pmu_lbr_init_snb(void)
         *   That requires LBR_FAR but that means far
         *   jmp need to be filtered out
         */
-       pr_cont("16-deep LBR, ");
 }
 
 /* haswell */
@@ -1011,7 +1102,8 @@ void intel_pmu_lbr_init_hsw(void)
        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
        x86_pmu.lbr_sel_map  = hsw_lbr_sel_map;
 
-       pr_cont("16-deep LBR, ");
+       if (lbr_from_signext_quirk_needed())
+               static_branch_enable(&lbr_from_quirk_key);
 }
 
 /* skylake */
@@ -1031,7 +1123,6 @@ __init void intel_pmu_lbr_init_skl(void)
         *   That requires LBR_FAR but that means far
         *   jmp need to be filtered out
         */
-       pr_cont("32-deep LBR, ");
 }
 
 /* atom */
@@ -1057,7 +1148,6 @@ void __init intel_pmu_lbr_init_atom(void)
         * SW branch filter usage:
         * - compensate for lack of HW filter
         */
-       pr_cont("8-deep LBR, ");
 }
 
 /* slm */
@@ -1088,6 +1178,4 @@ void intel_pmu_lbr_init_knl(void)
 
        x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
        x86_pmu.lbr_sel_map  = snb_lbr_sel_map;
-
-       pr_cont("8-deep LBR, ");
 }
index e30eef4f29a6f6034e46beab4a559f9994311c34..d0c58b35155f1721b59f6b935a65df9dfe814a65 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/slab.h>
 #include <linux/perf_event.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include "../perf_event.h"
 
 MODULE_LICENSE("GPL");
@@ -786,26 +787,27 @@ static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
 };
 
 static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
-       X86_RAPL_MODEL_MATCH(42, snb_rapl_init),        /* Sandy Bridge */
-       X86_RAPL_MODEL_MATCH(45, snbep_rapl_init),      /* Sandy Bridge-EP */
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE,   snb_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init),
 
-       X86_RAPL_MODEL_MATCH(58, snb_rapl_init),        /* Ivy Bridge */
-       X86_RAPL_MODEL_MATCH(62, snbep_rapl_init),      /* IvyTown */
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE,   snb_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
 
-       X86_RAPL_MODEL_MATCH(60, hsw_rapl_init),        /* Haswell */
-       X86_RAPL_MODEL_MATCH(63, hsx_rapl_init),        /* Haswell-Server */
-       X86_RAPL_MODEL_MATCH(69, hsw_rapl_init),        /* Haswell-Celeron */
-       X86_RAPL_MODEL_MATCH(70, hsw_rapl_init),        /* Haswell GT3e */
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X,    hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT,  hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
 
-       X86_RAPL_MODEL_MATCH(61, hsw_rapl_init),        /* Broadwell */
-       X86_RAPL_MODEL_MATCH(71, hsw_rapl_init),        /* Broadwell-H */
-       X86_RAPL_MODEL_MATCH(79, hsx_rapl_init),        /* Broadwell-Server */
-       X86_RAPL_MODEL_MATCH(86, hsx_rapl_init),        /* Broadwell Xeon D */
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE,   hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E,   hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,      hsw_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
 
-       X86_RAPL_MODEL_MATCH(87, knl_rapl_init),        /* Knights Landing */
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
 
-       X86_RAPL_MODEL_MATCH(78, skl_rapl_init),        /* Skylake */
-       X86_RAPL_MODEL_MATCH(94, skl_rapl_init),        /* Skylake H/S */
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE,  skl_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
+       X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X,       hsx_rapl_init),
        {},
 };
 
index fce74062d9812031b491d997f1e995578ccf8af9..59b4974c697fbc169e1c74929949258a4e9f7503 100644 (file)
@@ -1,4 +1,5 @@
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include "uncore.h"
 
 static struct intel_uncore_type *empty_uncore[] = { NULL, };
@@ -882,7 +883,7 @@ uncore_types_init(struct intel_uncore_type **types, bool setid)
 static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct intel_uncore_type *type;
-       struct intel_uncore_pmu *pmu;
+       struct intel_uncore_pmu *pmu = NULL;
        struct intel_uncore_box *box;
        int phys_id, pkg, ret;
 
@@ -903,20 +904,37 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
        }
 
        type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
+
        /*
-        * for performance monitoring unit with multiple boxes,
-        * each box has a different function id.
-        */
-       pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
-       /* Knights Landing uses a common PCI device ID for multiple instances of
-        * an uncore PMU device type. There is only one entry per device type in
-        * the knl_uncore_pci_ids table inspite of multiple devices present for
-        * some device types. Hence PCI device idx would be 0 for all devices.
-        * So increment pmu pointer to point to an unused array element.
+        * Some platforms, e.g.  Knights Landing, use a common PCI device ID
+        * for multiple instances of an uncore PMU device type. We should check
+        * PCI slot and func to indicate the uncore box.
         */
-       if (boot_cpu_data.x86_model == 87) {
-               while (pmu->func_id >= 0)
-                       pmu++;
+       if (id->driver_data & ~0xffff) {
+               struct pci_driver *pci_drv = pdev->driver;
+               const struct pci_device_id *ids = pci_drv->id_table;
+               unsigned int devfn;
+
+               while (ids && ids->vendor) {
+                       if ((ids->vendor == pdev->vendor) &&
+                           (ids->device == pdev->device)) {
+                               devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
+                                                 UNCORE_PCI_DEV_FUNC(ids->driver_data));
+                               if (devfn == pdev->devfn) {
+                                       pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
+                                       break;
+                               }
+                       }
+                       ids++;
+               }
+               if (pmu == NULL)
+                       return -ENODEV;
+       } else {
+               /*
+                * for performance monitoring unit with multiple boxes,
+                * each box has a different function id.
+                */
+               pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
        }
 
        if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
@@ -956,7 +974,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
 
 static void uncore_pci_remove(struct pci_dev *pdev)
 {
-       struct intel_uncore_box *box = pci_get_drvdata(pdev);
+       struct intel_uncore_box *box;
        struct intel_uncore_pmu *pmu;
        int i, phys_id, pkg;
 
@@ -1361,30 +1379,32 @@ static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
 };
 
 static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
+       .cpu_init = skl_uncore_cpu_init,
        .pci_init = skl_uncore_pci_init,
 };
 
 static const struct x86_cpu_id intel_uncore_match[] __initconst = {
-       X86_UNCORE_MODEL_MATCH(26, nhm_uncore_init),    /* Nehalem */
-       X86_UNCORE_MODEL_MATCH(30, nhm_uncore_init),
-       X86_UNCORE_MODEL_MATCH(37, nhm_uncore_init),    /* Westmere */
-       X86_UNCORE_MODEL_MATCH(44, nhm_uncore_init),
-       X86_UNCORE_MODEL_MATCH(42, snb_uncore_init),    /* Sandy Bridge */
-       X86_UNCORE_MODEL_MATCH(58, ivb_uncore_init),    /* Ivy Bridge */
-       X86_UNCORE_MODEL_MATCH(60, hsw_uncore_init),    /* Haswell */
-       X86_UNCORE_MODEL_MATCH(69, hsw_uncore_init),    /* Haswell Celeron */
-       X86_UNCORE_MODEL_MATCH(70, hsw_uncore_init),    /* Haswell */
-       X86_UNCORE_MODEL_MATCH(61, bdw_uncore_init),    /* Broadwell */
-       X86_UNCORE_MODEL_MATCH(71, bdw_uncore_init),    /* Broadwell */
-       X86_UNCORE_MODEL_MATCH(45, snbep_uncore_init),  /* Sandy Bridge-EP */
-       X86_UNCORE_MODEL_MATCH(46, nhmex_uncore_init),  /* Nehalem-EX */
-       X86_UNCORE_MODEL_MATCH(47, nhmex_uncore_init),  /* Westmere-EX aka. Xeon E7 */
-       X86_UNCORE_MODEL_MATCH(62, ivbep_uncore_init),  /* Ivy Bridge-EP */
-       X86_UNCORE_MODEL_MATCH(63, hswep_uncore_init),  /* Haswell-EP */
-       X86_UNCORE_MODEL_MATCH(79, bdx_uncore_init),    /* BDX-EP */
-       X86_UNCORE_MODEL_MATCH(86, bdx_uncore_init),    /* BDX-DE */
-       X86_UNCORE_MODEL_MATCH(87, knl_uncore_init),    /* Knights Landing */
-       X86_UNCORE_MODEL_MATCH(94, skl_uncore_init),    /* SkyLake */
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP,     nhm_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM,        nhm_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE,       nhm_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP,    nhm_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE,    snb_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE,      ivb_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE,   hsw_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT,    hsw_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E,   hsw_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X,  snbep_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX,     nhmex_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX,    nhmex_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X,    ivbep_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X,      hswep_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X,    bdx_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL,   knl_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
+       X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
        {},
 };
 
index 79766b9a35809fd072849f09b2b16ec0ea3ee901..d6063e438158494e90895e8274cef6b86c056c8c 100644 (file)
 #define UNCORE_PMC_IDX_FIXED           UNCORE_PMC_IDX_MAX_GENERIC
 #define UNCORE_PMC_IDX_MAX             (UNCORE_PMC_IDX_FIXED + 1)
 
+#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
+               ((dev << 24) | (func << 16) | (type << 8) | idx)
 #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
+#define UNCORE_PCI_DEV_DEV(data)       ((data >> 24) & 0xff)
+#define UNCORE_PCI_DEV_FUNC(data)      ((data >> 16) & 0xff)
 #define UNCORE_PCI_DEV_TYPE(data)      ((data >> 8) & 0xff)
 #define UNCORE_PCI_DEV_IDX(data)       (data & 0xff)
 #define UNCORE_EXTRA_PCI_DEV           0xff
@@ -360,6 +364,7 @@ int bdw_uncore_pci_init(void);
 int skl_uncore_pci_init(void);
 void snb_uncore_cpu_init(void);
 void nhm_uncore_cpu_init(void);
+void skl_uncore_cpu_init(void);
 int snb_pci2phy_map_init(int devid);
 
 /* perf_event_intel_uncore_snbep.c */
index 96531d2b843fc99b9e9060574c5bc1f9df44db82..97a69dbba649b6bb7c6dd936440f572c5d02ed6f 100644 (file)
@@ -1,4 +1,4 @@
-/* Nehalem/SandBridge/Haswell uncore support */
+/* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
 #include "uncore.h"
 
 /* Uncore IMC PCI IDs */
@@ -9,6 +9,7 @@
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC  0x0a04
 #define PCI_DEVICE_ID_INTEL_BDW_IMC    0x1604
 #define PCI_DEVICE_ID_INTEL_SKL_IMC    0x191f
+#define PCI_DEVICE_ID_INTEL_SKL_U_IMC  0x190c
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
 #define NHM_UNC_PERFEVTSEL0                     0x3c0
 #define NHM_UNC_UNCORE_PMC0                     0x3b0
 
+/* SKL uncore global control */
+#define SKL_UNC_PERF_GLOBAL_CTL                        0xe01
+#define SKL_UNC_GLOBAL_CTL_CORE_ALL            ((1 << 5) - 1)
+
 DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
 DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
 DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
@@ -179,6 +184,60 @@ void snb_uncore_cpu_init(void)
                snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
 }
 
+static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
+{
+       if (box->pmu->pmu_idx == 0) {
+               wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
+                       SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
+       }
+}
+
+static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
+{
+       if (box->pmu->pmu_idx == 0)
+               wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
+}
+
+static struct intel_uncore_ops skl_uncore_msr_ops = {
+       .init_box       = skl_uncore_msr_init_box,
+       .exit_box       = skl_uncore_msr_exit_box,
+       .disable_event  = snb_uncore_msr_disable_event,
+       .enable_event   = snb_uncore_msr_enable_event,
+       .read_counter   = uncore_msr_read_counter,
+};
+
+static struct intel_uncore_type skl_uncore_cbox = {
+       .name           = "cbox",
+       .num_counters   = 4,
+       .num_boxes      = 5,
+       .perf_ctr_bits  = 44,
+       .fixed_ctr_bits = 48,
+       .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
+       .event_ctl      = SNB_UNC_CBO_0_PERFEVTSEL0,
+       .fixed_ctr      = SNB_UNC_FIXED_CTR,
+       .fixed_ctl      = SNB_UNC_FIXED_CTR_CTRL,
+       .single_fixed   = 1,
+       .event_mask     = SNB_UNC_RAW_EVENT_MASK,
+       .msr_offset     = SNB_UNC_CBO_MSR_OFFSET,
+       .ops            = &skl_uncore_msr_ops,
+       .format_group   = &snb_uncore_format_group,
+       .event_descs    = snb_uncore_events,
+};
+
+static struct intel_uncore_type *skl_msr_uncores[] = {
+       &skl_uncore_cbox,
+       &snb_uncore_arb,
+       NULL,
+};
+
+void skl_uncore_cpu_init(void)
+{
+       uncore_msr_uncores = skl_msr_uncores;
+       if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
+               skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+       snb_uncore_arb.ops = &skl_uncore_msr_ops;
+}
+
 enum {
        SNB_PCI_UNCORE_IMC,
 };
@@ -544,6 +603,11 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
        },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+
        { /* end: all zeroes */ },
 };
 
@@ -587,6 +651,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
        IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
        IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
        IMC_DEV(SKL_IMC, &skl_uncore_pci_driver),    /* 6th Gen Core */
+       IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core U */
        {  /* end marker */ }
 };
 
index 874e8bd64d1d54cdf662c5ba348f8872a0265e24..824e54086e071456b170380c52e561dbbac62cf7 100644 (file)
@@ -2164,21 +2164,101 @@ static struct intel_uncore_type *knl_pci_uncores[] = {
 */
 
 static const struct pci_device_id knl_uncore_pci_ids[] = {
-       { /* MC UClk */
+       { /* MC0 UClk */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
-               .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_UCLK, 0),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
        },
-       { /* MC DClk Channel */
+       { /* MC1 UClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
+       },
+       { /* MC0 DClk CH 0 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
+       },
+       { /* MC0 DClk CH 1 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
+       },
+       { /* MC0 DClk CH 2 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
+       },
+       { /* MC1 DClk CH 0 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
+       },
+       { /* MC1 DClk CH 1 */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
-               .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_DCLK, 0),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
+       },
+       { /* MC1 DClk CH 2 */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
+       },
+       { /* EDC0 UClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
+       },
+       { /* EDC1 UClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
+       },
+       { /* EDC2 UClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
+       },
+       { /* EDC3 UClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
        },
-       { /* EDC UClk */
+       { /* EDC4 UClk */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
-               .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_UCLK, 0),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
+       },
+       { /* EDC5 UClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
+       },
+       { /* EDC6 UClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
+       },
+       { /* EDC7 UClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
+       },
+       { /* EDC0 EClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
+       },
+       { /* EDC1 EClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
+       },
+       { /* EDC2 EClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
+       },
+       { /* EDC3 EClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
+       },
+       { /* EDC4 EClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
+       },
+       { /* EDC5 EClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
+       },
+       { /* EDC6 EClk */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
        },
-       { /* EDC EClk */
+       { /* EDC7 EClk */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
-               .driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_ECLK, 0),
+               .driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
        },
        { /* M2PCIe */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
index 85ef3c2e80e0450350f347bfa08e3036c459e857..50b3a056f96b141cc4f672de735a2accb2af28cd 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/perf_event.h>
+#include <asm/intel-family.h>
 
 enum perf_msr_id {
        PERF_MSR_TSC                    = 0,
@@ -34,39 +35,43 @@ static bool test_intel(int idx)
                return false;
 
        switch (boot_cpu_data.x86_model) {
-       case 30: /* 45nm Nehalem    */
-       case 26: /* 45nm Nehalem-EP */
-       case 46: /* 45nm Nehalem-EX */
-
-       case 37: /* 32nm Westmere    */
-       case 44: /* 32nm Westmere-EP */
-       case 47: /* 32nm Westmere-EX */
-
-       case 42: /* 32nm SandyBridge         */
-       case 45: /* 32nm SandyBridge-E/EN/EP */
-
-       case 58: /* 22nm IvyBridge       */
-       case 62: /* 22nm IvyBridge-EP/EX */
-
-       case 60: /* 22nm Haswell Core */
-       case 63: /* 22nm Haswell Server */
-       case 69: /* 22nm Haswell ULT */
-       case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
-
-       case 61: /* 14nm Broadwell Core-M */
-       case 86: /* 14nm Broadwell Xeon D */
-       case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
-       case 79: /* 14nm Broadwell Server */
-
-       case 55: /* 22nm Atom "Silvermont"                */
-       case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
-       case 76: /* 14nm Atom "Airmont"                   */
+       case INTEL_FAM6_NEHALEM:
+       case INTEL_FAM6_NEHALEM_EP:
+       case INTEL_FAM6_NEHALEM_EX:
+
+       case INTEL_FAM6_WESTMERE:
+       case INTEL_FAM6_WESTMERE2:
+       case INTEL_FAM6_WESTMERE_EP:
+       case INTEL_FAM6_WESTMERE_EX:
+
+       case INTEL_FAM6_SANDYBRIDGE:
+       case INTEL_FAM6_SANDYBRIDGE_X:
+
+       case INTEL_FAM6_IVYBRIDGE:
+       case INTEL_FAM6_IVYBRIDGE_X:
+
+       case INTEL_FAM6_HASWELL_CORE:
+       case INTEL_FAM6_HASWELL_X:
+       case INTEL_FAM6_HASWELL_ULT:
+       case INTEL_FAM6_HASWELL_GT3E:
+
+       case INTEL_FAM6_BROADWELL_CORE:
+       case INTEL_FAM6_BROADWELL_XEON_D:
+       case INTEL_FAM6_BROADWELL_GT3E:
+       case INTEL_FAM6_BROADWELL_X:
+
+       case INTEL_FAM6_ATOM_SILVERMONT1:
+       case INTEL_FAM6_ATOM_SILVERMONT2:
+       case INTEL_FAM6_ATOM_AIRMONT:
                if (idx == PERF_MSR_SMI)
                        return true;
                break;
 
-       case 78: /* 14nm Skylake Mobile */
-       case 94: /* 14nm Skylake Desktop */
+       case INTEL_FAM6_SKYLAKE_MOBILE:
+       case INTEL_FAM6_SKYLAKE_DESKTOP:
+       case INTEL_FAM6_SKYLAKE_X:
+       case INTEL_FAM6_KABYLAKE_MOBILE:
+       case INTEL_FAM6_KABYLAKE_DESKTOP:
                if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
                        return true;
                break;
index 8bd764df815d36287ca4b29effb969ede50735a6..8c4a47706296ab49a2c3825c6312e207e608bdeb 100644 (file)
@@ -668,6 +668,14 @@ static struct perf_pmu_events_attr event_attr_##v = {                      \
        .event_str      = str,                                          \
 };
 
+#define EVENT_ATTR_STR_HT(_name, v, noht, ht)                          \
+static struct perf_pmu_events_ht_attr event_attr_##v = {               \
+       .attr           = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
+       .id             = 0,                                            \
+       .event_str_noht = noht,                                         \
+       .event_str_ht   = ht,                                           \
+}
+
 extern struct x86_pmu x86_pmu __read_mostly;
 
 static inline bool x86_pmu_has_lbr_callstack(void)
@@ -803,6 +811,8 @@ struct attribute **merge_attr(struct attribute **a, struct attribute **b);
 
 ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
                          char *page);
+ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
+                         char *page);
 
 #ifdef CONFIG_CPU_SUP_AMD
 
@@ -892,6 +902,8 @@ void intel_ds_init(void);
 
 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
 
+u64 lbr_from_signext_quirk_wr(u64 val);
+
 void intel_pmu_lbr_reset(void);
 
 void intel_pmu_lbr_enable(struct perf_event *event);
index aeac434c9febd6e6cd4b5c595288e4b2eb2c9f12..2cfed174e3c9fda7e5d8d642ad72a8f2b58a751c 100644 (file)
@@ -1,5 +1,11 @@
 
 
+generated-y += syscalls_32.h
+generated-y += syscalls_64.h
+generated-y += unistd_32_ia32.h
+generated-y += unistd_64_x32.h
+generated-y += xen-hypercalls.h
+
 genhdr-y += unistd_32.h
 genhdr-y += unistd_64.h
 genhdr-y += unistd_x32.h
index bc27611fa58f1b0b522beefb3117afb1f5c250e6..f5befd4945f2a84d4123ce47d2c68d5bab1a0a88 100644 (file)
@@ -300,7 +300,6 @@ struct apic {
 
        unsigned int (*get_apic_id)(unsigned long x);
        unsigned long (*set_apic_id)(unsigned int id);
-       unsigned long apic_id_mask;
 
        int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
                                      const struct cpumask *andmask,
index 20370c6db74bb6dd09bbb78d4ded0b7adfd27d3e..93eebc636c7616395ffa5b55f81c33e9b35dbb71 100644 (file)
@@ -45,11 +45,11 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
                : "memory", "cc");
 }
 
-static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
-                                               u32 ecx_in, u32 *eax)
+static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+                                           u32 ecx_in, u32 *eax)
 {
        int     cx, dx, si;
-       u8      error;
+       bool    error;
 
        /*
         * N.B. We do NOT need a cld after the BIOS call
index 02e799fa43d1b19c878290f6424e1b2f7293074d..e7cd63175de443f68aa536237a82a92120ca08f3 100644 (file)
@@ -4,8 +4,8 @@
 #include <asm/cpufeatures.h>
 
 #ifdef CONFIG_64BIT
-/* popcnt %edi, %eax -- redundant REX prefix for alignment */
-#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
+/* popcnt %edi, %eax */
+#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
 /* popcnt %rdi, %rax */
 #define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
 #define REG_IN "D"
 #define REG_OUT "a"
 #endif
 
-/*
- * __sw_hweightXX are called from within the alternatives below
- * and callee-clobbered registers need to be taken care of. See
- * ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
- * compiler switches.
- */
+#define __HAVE_ARCH_SW_HWEIGHT
+
 static __always_inline unsigned int __arch_hweight32(unsigned int w)
 {
-       unsigned int res = 0;
+       unsigned int res;
 
        asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
-                    : "="REG_OUT (res)
-                    : REG_IN (w));
+                        : "="REG_OUT (res)
+                        : REG_IN (w));
 
        return res;
 }
@@ -53,11 +49,11 @@ static inline unsigned long __arch_hweight64(__u64 w)
 #else
 static __always_inline unsigned long __arch_hweight64(__u64 w)
 {
-       unsigned long res = 0;
+       unsigned long res;
 
        asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
-                    : "="REG_OUT (res)
-                    : REG_IN (w));
+                        : "="REG_OUT (res)
+                        : REG_IN (w));
 
        return res;
 }
index 69f1366f1aa33721ea160cc831a1345867b7a4b3..5b0579abb39829ce3c6a1b4f00e73dd9295c26dc 100644 (file)
@@ -25,8 +25,6 @@
 
 #include <asm/processor.h>
 #include <asm/cpufeature.h>
-#include <asm/alternative.h>
-#include <asm/nops.h>
 
 #define RDRAND_RETRY_LOOPS     10
 
 # define RDSEED_LONG   RDSEED_INT
 #endif
 
-#ifdef CONFIG_ARCH_RANDOM
+/* Unconditional execution of RDRAND and RDSEED */
 
-/* Instead of arch_get_random_long() when alternatives haven't run. */
-static inline int rdrand_long(unsigned long *v)
+static inline bool rdrand_long(unsigned long *v)
 {
-       int ok;
-       asm volatile("1: " RDRAND_LONG "\n\t"
-                    "jc 2f\n\t"
-                    "decl %0\n\t"
-                    "jnz 1b\n\t"
-                    "2:"
-                    : "=r" (ok), "=a" (*v)
-                    : "0" (RDRAND_RETRY_LOOPS));
-       return ok;
+       bool ok;
+       unsigned int retry = RDRAND_RETRY_LOOPS;
+       do {
+               asm volatile(RDRAND_LONG "\n\t"
+                            CC_SET(c)
+                            : CC_OUT(c) (ok), "=a" (*v));
+               if (ok)
+                       return true;
+       } while (--retry);
+       return false;
+}
+
+static inline bool rdrand_int(unsigned int *v)
+{
+       bool ok;
+       unsigned int retry = RDRAND_RETRY_LOOPS;
+       do {
+               asm volatile(RDRAND_INT "\n\t"
+                            CC_SET(c)
+                            : CC_OUT(c) (ok), "=a" (*v));
+               if (ok)
+                       return true;
+       } while (--retry);
+       return false;
 }
 
-/* A single attempt at RDSEED */
 static inline bool rdseed_long(unsigned long *v)
 {
-       unsigned char ok;
+       bool ok;
        asm volatile(RDSEED_LONG "\n\t"
-                    "setc %0"
-                    : "=qm" (ok), "=a" (*v));
+                    CC_SET(c)
+                    : CC_OUT(c) (ok), "=a" (*v));
        return ok;
 }
 
-#define GET_RANDOM(name, type, rdrand, nop)                    \
-static inline int name(type *v)                                        \
-{                                                              \
-       int ok;                                                 \
-       alternative_io("movl $0, %0\n\t"                        \
-                      nop,                                     \
-                      "\n1: " rdrand "\n\t"                    \
-                      "jc 2f\n\t"                              \
-                      "decl %0\n\t"                            \
-                      "jnz 1b\n\t"                             \
-                      "2:",                                    \
-                      X86_FEATURE_RDRAND,                      \
-                      ASM_OUTPUT2("=r" (ok), "=a" (*v)),       \
-                      "0" (RDRAND_RETRY_LOOPS));               \
-       return ok;                                              \
-}
-
-#define GET_SEED(name, type, rdseed, nop)                      \
-static inline int name(type *v)                                        \
-{                                                              \
-       unsigned char ok;                                       \
-       alternative_io("movb $0, %0\n\t"                        \
-                      nop,                                     \
-                      rdseed "\n\t"                            \
-                      "setc %0",                               \
-                      X86_FEATURE_RDSEED,                      \
-                      ASM_OUTPUT2("=q" (ok), "=a" (*v)));      \
-       return ok;                                              \
+static inline bool rdseed_int(unsigned int *v)
+{
+       bool ok;
+       asm volatile(RDSEED_INT "\n\t"
+                    CC_SET(c)
+                    : CC_OUT(c) (ok), "=a" (*v));
+       return ok;
 }
 
-#ifdef CONFIG_X86_64
-
-GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
-GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
-
-GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP5);
-GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
-
-#else
-
-GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
-GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
-
-GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP4);
-GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4);
-
-#endif /* CONFIG_X86_64 */
-
+/* Conditional execution based on CPU type */
 #define arch_has_random()      static_cpu_has(X86_FEATURE_RDRAND)
 #define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED)
 
-#else
+/*
+ * These are the generic interfaces; they must not be declared if the
+ * stubs in <linux/random.h> are to be invoked,
+ * i.e. CONFIG_ARCH_RANDOM is not defined.
+ */
+#ifdef CONFIG_ARCH_RANDOM
 
-static inline int rdrand_long(unsigned long *v)
+static inline bool arch_get_random_long(unsigned long *v)
 {
-       return 0;
+       return arch_has_random() ? rdrand_long(v) : false;
 }
 
-static inline bool rdseed_long(unsigned long *v)
+static inline bool arch_get_random_int(unsigned int *v)
 {
-       return 0;
+       return arch_has_random() ? rdrand_int(v) : false;
 }
 
-#endif  /* CONFIG_ARCH_RANDOM */
+static inline bool arch_get_random_seed_long(unsigned long *v)
+{
+       return arch_has_random_seed() ? rdseed_long(v) : false;
+}
+
+static inline bool arch_get_random_seed_int(unsigned int *v)
+{
+       return arch_has_random_seed() ? rdseed_int(v) : false;
+}
 
 extern void x86_init_rdrand(struct cpuinfo_x86 *c);
 
+#else  /* !CONFIG_ARCH_RANDOM */
+
+static inline void x86_init_rdrand(struct cpuinfo_x86 *c) { }
+
+#endif  /* !CONFIG_ARCH_RANDOM */
+
 #endif /* ASM_X86_ARCHRANDOM_H */
index f5063b6659eb37f1a835bc5e614cf251ac48e032..7acb51c49fec46220052c94c5f6a4a308f4a1fc9 100644 (file)
 #define _ASM_SI                __ASM_REG(si)
 #define _ASM_DI                __ASM_REG(di)
 
+/*
+ * Macros to generate condition code outputs from inline assembly,
+ * The output operand must be type "bool".
+ */
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+# define CC_SET(c) "\n\t/* output condition code " #c "*/\n"
+# define CC_OUT(c) "=@cc" #c
+#else
+# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n"
+# define CC_OUT(c) [_cc_ ## c] "=qm"
+#endif
+
 /* Exception table entry */
 #ifdef __ASSEMBLY__
 # define _ASM_EXTABLE_HANDLE(from, to, handler)                        \
index 3e86742881984006555fa433d9988a2c4eaa7b4f..14635c5ea025138ba49cf5a4305c990c60f1ac6c 100644 (file)
@@ -75,9 +75,9 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
+static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
 }
 
 /**
@@ -112,9 +112,9 @@ static __always_inline void atomic_dec(atomic_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-static __always_inline int atomic_dec_and_test(atomic_t *v)
+static __always_inline bool atomic_dec_and_test(atomic_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
 }
 
 /**
@@ -125,9 +125,9 @@ static __always_inline int atomic_dec_and_test(atomic_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-static __always_inline int atomic_inc_and_test(atomic_t *v)
+static __always_inline bool atomic_inc_and_test(atomic_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
 }
 
 /**
@@ -139,9 +139,9 @@ static __always_inline int atomic_inc_and_test(atomic_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-static __always_inline int atomic_add_negative(int i, atomic_t *v)
+static __always_inline bool atomic_add_negative(int i, atomic_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
 }
 
 /**
@@ -171,6 +171,16 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v)
 #define atomic_inc_return(v)  (atomic_add_return(1, v))
 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
 
+static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+{
+       return xadd(&v->counter, i);
+}
+
+static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+{
+       return xadd(&v->counter, -i);
+}
+
 static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        return cmpxchg(&v->counter, old, new);
@@ -190,10 +200,29 @@ static inline void atomic_##op(int i, atomic_t *v)                        \
                        : "memory");                                    \
 }
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#define ATOMIC_FETCH_OP(op, c_op)                                      \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                \
+{                                                                      \
+       int old, val = atomic_read(v);                                  \
+       for (;;) {                                                      \
+               old = atomic_cmpxchg(v, val, val c_op i);               \
+               if (old == val)                                         \
+                       break;                                          \
+               val = old;                                              \
+       }                                                               \
+       return old;                                                     \
+}
+
+#define ATOMIC_OPS(op, c_op)                                           \
+       ATOMIC_OP(op)                                                   \
+       ATOMIC_FETCH_OP(op, c_op)
+
+ATOMIC_OPS(and, &)
+ATOMIC_OPS(or , |)
+ATOMIC_OPS(xor, ^)
 
+#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP
 
 /**
index a984111135b16e9c7c51d2cde681aa3b310a6a6b..71d7705fb303ba5238f4f812fdb071277addbce8 100644 (file)
@@ -320,10 +320,29 @@ static inline void atomic64_##op(long long i, atomic64_t *v)              \
                c = old;                                                \
 }
 
-ATOMIC64_OP(and, &)
-ATOMIC64_OP(or, |)
-ATOMIC64_OP(xor, ^)
+#define ATOMIC64_FETCH_OP(op, c_op)                                    \
+static inline long long atomic64_fetch_##op(long long i, atomic64_t *v)        \
+{                                                                      \
+       long long old, c = 0;                                           \
+       while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c)           \
+               c = old;                                                \
+       return old;                                                     \
+}
+
+ATOMIC64_FETCH_OP(add, +)
+
+#define atomic64_fetch_sub(i, v)       atomic64_fetch_add(-(i), (v))
+
+#define ATOMIC64_OPS(op, c_op)                                         \
+       ATOMIC64_OP(op, c_op)                                           \
+       ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &)
+ATOMIC64_OPS(or, |)
+ATOMIC64_OPS(xor, ^)
 
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP
 
 #endif /* _ASM_X86_ATOMIC64_32_H */
index 037351022f5483c99a3f0a42dd4646f1eb0a3055..89ed2f6ae2f76accf15ad16810c6aceb4a1854ae 100644 (file)
@@ -70,9 +70,9 @@ static inline void atomic64_sub(long i, atomic64_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-static inline int atomic64_sub_and_test(long i, atomic64_t *v)
+static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", "e");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
 }
 
 /**
@@ -109,9 +109,9 @@ static __always_inline void atomic64_dec(atomic64_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-static inline int atomic64_dec_and_test(atomic64_t *v)
+static inline bool atomic64_dec_and_test(atomic64_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
 }
 
 /**
@@ -122,9 +122,9 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-static inline int atomic64_inc_and_test(atomic64_t *v)
+static inline bool atomic64_inc_and_test(atomic64_t *v)
 {
-       GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", "e");
+       GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
 }
 
 /**
@@ -136,9 +136,9 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-static inline int atomic64_add_negative(long i, atomic64_t *v)
+static inline bool atomic64_add_negative(long i, atomic64_t *v)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", "s");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
 }
 
 /**
@@ -158,6 +158,16 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
        return atomic64_add_return(-i, v);
 }
 
+static inline long atomic64_fetch_add(long i, atomic64_t *v)
+{
+       return xadd(&v->counter, i);
+}
+
+static inline long atomic64_fetch_sub(long i, atomic64_t *v)
+{
+       return xadd(&v->counter, -i);
+}
+
 #define atomic64_inc_return(v)  (atomic64_add_return(1, (v)))
 #define atomic64_dec_return(v)  (atomic64_sub_return(1, (v)))
 
@@ -180,7 +190,7 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns the old value of @v.
  */
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
 {
        long c, old;
        c = atomic64_read(v);
@@ -229,10 +239,29 @@ static inline void atomic64_##op(long i, atomic64_t *v)                   \
                        : "memory");                                    \
 }
 
-ATOMIC64_OP(and)
-ATOMIC64_OP(or)
-ATOMIC64_OP(xor)
+#define ATOMIC64_FETCH_OP(op, c_op)                                    \
+static inline long atomic64_fetch_##op(long i, atomic64_t *v)          \
+{                                                                      \
+       long old, val = atomic64_read(v);                               \
+       for (;;) {                                                      \
+               old = atomic64_cmpxchg(v, val, val c_op i);             \
+               if (old == val)                                         \
+                       break;                                          \
+               val = old;                                              \
+       }                                                               \
+       return old;                                                     \
+}
+
+#define ATOMIC64_OPS(op, c_op)                                         \
+       ATOMIC64_OP(op)                                                 \
+       ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &)
+ATOMIC64_OPS(or, |)
+ATOMIC64_OPS(xor, ^)
 
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP
 
 #endif /* _ASM_X86_ATOMIC64_64_H */
index 2b00c776f223af3e05e35a7c53096e77f724e892..4b7b8e71607eaf8f800770f4dfc3b0ce9d5f675b 100644 (file)
@@ -17,7 +17,7 @@ static inline unsigned int get_bios_ebda(void)
        return address; /* 0 means none */
 }
 
-void reserve_ebda_region(void);
+void reserve_bios_regions(void);
 
 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
 /*
index 7766d1cf096e80d56562d63876f8ca65df869199..68557f52b9619ddfed7681fc43c2d0530c625f0f 100644 (file)
@@ -201,9 +201,9 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c);
 }
 
 /**
@@ -213,7 +213,7 @@ static __always_inline int test_and_set_bit(long nr, volatile unsigned long *add
  *
  * This is the same as test_and_set_bit on x86.
  */
-static __always_inline int
+static __always_inline bool
 test_and_set_bit_lock(long nr, volatile unsigned long *addr)
 {
        return test_and_set_bit(nr, addr);
@@ -228,13 +228,13 @@ test_and_set_bit_lock(long nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-       int oldbit;
+       bool oldbit;
 
        asm("bts %2,%1\n\t"
-           "sbb %0,%0"
-           : "=r" (oldbit), ADDR
+           CC_SET(c)
+           : CC_OUT(c) (oldbit), ADDR
            : "Ir" (nr));
        return oldbit;
 }
@@ -247,9 +247,9 @@ static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *a
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c);
 }
 
 /**
@@ -268,25 +268,25 @@ static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *a
  * accessed from a hypervisor on the same CPU if running in a VM: don't change
  * this without also updating arch/x86/kernel/kvm.c
  */
-static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-       int oldbit;
+       bool oldbit;
 
        asm volatile("btr %2,%1\n\t"
-                    "sbb %0,%0"
-                    : "=r" (oldbit), ADDR
+                    CC_SET(c)
+                    : CC_OUT(c) (oldbit), ADDR
                     : "Ir" (nr));
        return oldbit;
 }
 
 /* WARNING: non atomic and it can be reordered! */
-static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-       int oldbit;
+       bool oldbit;
 
        asm volatile("btc %2,%1\n\t"
-                    "sbb %0,%0"
-                    : "=r" (oldbit), ADDR
+                    CC_SET(c)
+                    : CC_OUT(c) (oldbit), ADDR
                     : "Ir" (nr) : "memory");
 
        return oldbit;
@@ -300,24 +300,24 @@ static __always_inline int __test_and_change_bit(long nr, volatile unsigned long
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-       GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
+       GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c);
 }
 
-static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
+static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr)
 {
        return ((1UL << (nr & (BITS_PER_LONG-1))) &
                (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
 }
 
-static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
+static __always_inline bool variable_test_bit(long nr, volatile const unsigned long *addr)
 {
-       int oldbit;
+       bool oldbit;
 
        asm volatile("bt %2,%1\n\t"
-                    "sbb %0,%0"
-                    : "=r" (oldbit)
+                    CC_SET(c)
+                    : CC_OUT(c) (oldbit)
                     : "m" (*(unsigned long *)addr), "Ir" (nr));
 
        return oldbit;
@@ -329,7 +329,7 @@ static __always_inline int variable_test_bit(long nr, volatile const unsigned lo
  * @nr: bit number to test
  * @addr: Address to start counting from
  */
-static int test_bit(int nr, const volatile unsigned long *addr);
+static bool test_bit(int nr, const volatile unsigned long *addr);
 #endif
 
 #define test_bit(nr, addr)                     \
index 532f85e6651f4e86060fd35add94431d78c20df3..7b53743ed267feefc0be1303ae9f9b97233034b2 100644 (file)
@@ -2,8 +2,7 @@
 #define _ASM_X86_CHECKSUM_32_H
 
 #include <linux/in6.h>
-
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 /*
  * computes the checksum of a memory block at buff, length len,
index 5a3b2c119ed0eb70137bff968607f5d44481f223..a18806165fe49a4bb84b368ea1c62cb775e2b383 100644 (file)
@@ -40,6 +40,7 @@ typedef s32           compat_long_t;
 typedef s64 __attribute__((aligned(4))) compat_s64;
 typedef u32            compat_uint_t;
 typedef u32            compat_ulong_t;
+typedef u32            compat_u32;
 typedef u64 __attribute__((aligned(4))) compat_u64;
 typedef u32            compat_uptr_t;
 
@@ -181,6 +182,16 @@ typedef struct compat_siginfo {
                /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
                struct {
                        unsigned int _addr;     /* faulting insn/memory ref. */
+                       short int _addr_lsb;    /* Valid LSB of the reported address. */
+                       union {
+                               /* used when si_code=SEGV_BNDERR */
+                               struct {
+                                       compat_uptr_t _lower;
+                                       compat_uptr_t _upper;
+                               } _addr_bnd;
+                               /* used when si_code=SEGV_PKUERR */
+                               compat_u32 _pkey;
+                       };
                } _sigfault;
 
                /* SIGPOLL */
index 678637ad7476358a8263deee72a84e476912d2f0..59d34c521d964f294e4ad66ce6900ad16b19d999 100644 (file)
@@ -17,7 +17,6 @@ static inline void prefill_possible_map(void) {}
 
 #define cpu_physical_id(cpu)                   boot_cpu_physical_apicid
 #define safe_smp_processor_id()                        0
-#define stack_smp_processor_id()               0
 
 #endif /* CONFIG_SMP */
 
index 4a413485f9eb8ef58ec71c77ff2594f4300c8ea6..c64b1e9c5d1a30d916be2d944a3e94134b240fb0 100644 (file)
 #define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
 #define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
 #define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_NULL_SEG       X86_BUG(9) /* Nulling a selector preserves the base */
-#define X86_BUG_SWAPGS_FENCE   X86_BUG(10) /* SWAPGS without input dep on GS */
-
-
 #ifdef CONFIG_X86_32
 /*
  * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional
  */
 #define X86_BUG_ESPFIX         X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
 #endif
+#define X86_BUG_NULL_SEG       X86_BUG(10) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE   X86_BUG(11) /* SWAPGS without input dep on GS */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 78d1e7467eae9fb9bd200cb04205c22322b01b4f..d0bb76d81402c192f7b67cab74eb72285ec8bc1d 100644 (file)
@@ -41,10 +41,9 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
 /*
  * Wrap all the virtual calls in a way that forces the parameters on the stack.
  */
-#define arch_efi_call_virt(f, args...)                                 \
+#define arch_efi_call_virt(p, f, args...)                              \
 ({                                                                     \
-       ((efi_##f##_t __attribute__((regparm(0)))*)                     \
-               efi.systab->runtime->f)(args);                          \
+       ((efi_##f##_t __attribute__((regparm(0)))*) p->f)(args);        \
 })
 
 #define efi_ioremap(addr, size, type, attr)    ioremap_cache(addr, size)
@@ -81,8 +80,8 @@ struct efi_scratch {
        }                                                               \
 })
 
-#define arch_efi_call_virt(f, args...)                                 \
-       efi_call((void *)efi.systab->runtime->f, args)                  \
+#define arch_efi_call_virt(p, f, args...)                              \
+       efi_call((void *)p->f, args)                                    \
 
 #define arch_efi_call_virt_teardown()                                  \
 ({                                                                     \
@@ -125,7 +124,6 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
 extern void efi_sync_low_kernel_mappings(void);
 extern int __init efi_alloc_page_tables(void);
 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
-extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
 extern void __init old_map_region(efi_memory_desc_t *md);
 extern void __init runtime_code_page_mkexec(void);
 extern void __init efi_runtime_update_mappings(void);
index 31ac8e6d9f36693a18a9aa46299d4cf3b5c84acd..116b5834750121515ccca232044bba439176aaa2 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/fpu/api.h>
 #include <asm/fpu/xstate.h>
 #include <asm/cpufeature.h>
+#include <asm/trace/fpu.h>
 
 /*
  * High level FPU state handling functions:
@@ -524,6 +525,7 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
 
        fpu->fpregs_active = 0;
        this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+       trace_x86_fpu_regs_deactivated(fpu);
 }
 
 /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
@@ -533,6 +535,7 @@ static inline void __fpregs_activate(struct fpu *fpu)
 
        fpu->fpregs_active = 1;
        this_cpu_write(fpu_fpregs_owner_ctx, fpu);
+       trace_x86_fpu_regs_activated(fpu);
 }
 
 /*
@@ -604,11 +607,13 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 
                /* But leave fpu_fpregs_owner_ctx! */
                old_fpu->fpregs_active = 0;
+               trace_x86_fpu_regs_deactivated(old_fpu);
 
                /* Don't change CR0.TS if we just switch! */
                if (fpu.preload) {
                        new_fpu->counter++;
                        __fpregs_activate(new_fpu);
+                       trace_x86_fpu_regs_activated(new_fpu);
                        prefetch(&new_fpu->state);
                } else {
                        __fpregs_deactivate_hw();
index 36b90bbfc69fa8eb5fa4daa6d97438730ab8887f..48df486b02f9170e9ba6f9cb2b78f45eb3e53d4b 100644 (file)
@@ -122,6 +122,7 @@ enum xfeature {
 #define XFEATURE_MASK_OPMASK           (1 << XFEATURE_OPMASK)
 #define XFEATURE_MASK_ZMM_Hi256                (1 << XFEATURE_ZMM_Hi256)
 #define XFEATURE_MASK_Hi16_ZMM         (1 << XFEATURE_Hi16_ZMM)
+#define XFEATURE_MASK_PT               (1 << XFEATURE_PT_UNIMPLEMENTED_SO_FAR)
 #define XFEATURE_MASK_PKRU             (1 << XFEATURE_PKRU)
 
 #define XFEATURE_MASK_FPSSE            (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
@@ -230,6 +231,12 @@ struct xstate_header {
        u64                             reserved[6];
 } __attribute__((packed));
 
+/*
+ * xstate_header.xcomp_bv[63] indicates that the extended_state_area
+ * is in compacted format.
+ */
+#define XCOMP_BV_COMPACTED_FORMAT ((u64)1 << 63)
+
 /*
  * This is our most modern FPU state format, as saved by the XSAVE
  * and restored by the XRSTOR instructions.
index 38951b0fcc5a408130b697296d0bfd48ceee83f4..ae55a43e09c0f20846919680f85a1c15538432bc 100644 (file)
@@ -18,6 +18,9 @@
 #define XSAVE_YMM_SIZE     256
 #define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
 
+/* Supervisor features */
+#define XFEATURE_MASK_SUPERVISOR (XFEATURE_MASK_PT)
+
 /* Supported features which support lazy state saving */
 #define XFEATURE_MASK_LAZY     (XFEATURE_MASK_FP | \
                                 XFEATURE_MASK_SSE | \
@@ -39,7 +42,6 @@
 #define REX_PREFIX
 #endif
 
-extern unsigned int xstate_size;
 extern u64 xfeatures_mask;
 extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
 
@@ -48,5 +50,9 @@ extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
 void fpu__xstate_clear_all_cpu_caps(void);
 void *get_xsave_addr(struct xregs_state *xsave, int xstate);
 const void *get_xsave_field_ptr(int xstate_field);
-
+int using_compacted_format(void);
+int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
+                       void __user *ubuf, struct xregs_state *xsave);
+int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
+                    struct xregs_state *xsave);
 #endif
index 7c5af123bdbd21680744000f1a6195644a5140b3..9d6b097aa73dfb4242e13614846bf3948c41066f 100644 (file)
 #define _ASM_X86_INTEL_MID_H
 
 #include <linux/sfi.h>
+#include <linux/pci.h>
 #include <linux/platform_device.h>
 
 extern int intel_mid_pci_init(void);
+extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+
+#define INTEL_MID_PWR_LSS_OFFSET       4
+#define INTEL_MID_PWR_LSS_TYPE         (1 << 7)
+
+extern int intel_mid_pwr_get_lss_id(struct pci_dev *pdev);
+
 extern int get_gpio_by_name(const char *name);
 extern void intel_scu_device_register(struct platform_device *pdev);
 extern int __init sfi_parse_mrtc(struct sfi_table_header *table);
@@ -34,13 +42,28 @@ struct devs_id {
        void *(*get_platform_data)(void *info);
        /* Custom handler for devices */
        void (*device_handler)(struct sfi_device_table_entry *pentry,
-                               struct devs_id *dev);
+                              struct devs_id *dev);
 };
 
-#define sfi_device(i)   \
-       static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \
+#define sfi_device(i)                                                          \
+       static const struct devs_id *const __intel_mid_sfi_##i##_dev __used     \
        __attribute__((__section__(".x86_intel_mid_dev.init"))) = &i
 
+/**
+* struct mid_sd_board_info - template for SD device creation
+* @name:               identifies the driver
+* @bus_num:            board-specific identifier for a given SD controller
+* @max_clk:            the maximum frequency device supports
+* @platform_data:      the particular data stored there is driver-specific
+*/
+struct mid_sd_board_info {
+       char            name[SFI_NAME_LEN];
+       int             bus_num;
+       unsigned short  addr;
+       u32             max_clk;
+       void            *platform_data;
+};
+
 /*
  * Medfield is the follow-up of Moorestown, it combines two chip solution into
  * one. Other than that it also added always-on and constant tsc and lapic
@@ -60,7 +83,7 @@ extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
 /**
  * struct intel_mid_ops - Interface between intel-mid & sub archs
  * @arch_setup: arch_setup function to re-initialize platform
- *             structures (x86_init, x86_platform_init)
+ *             structures (x86_init, x86_platform_init)
  *
  * This structure can be extended if any new interface is required
  * between intel-mid & its sub arch files.
@@ -70,20 +93,20 @@ struct intel_mid_ops {
 };
 
 /* Helper API's for INTEL_MID_OPS_INIT */
-#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid)     \
-                               [cpuid] = get_##cpuname##_ops
+#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid)                             \
+       [cpuid] = get_##cpuname##_ops
 
 /* Maximum number of CPU ops */
-#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
+#define MAX_CPU_OPS(a)                 (sizeof(a)/sizeof(void *))
 
 /*
  * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
  * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
  */
-#define INTEL_MID_OPS_INIT {\
-       DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
-       DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
-       DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
+#define INTEL_MID_OPS_INIT {                                                   \
+       DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL),        \
+       DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW),  \
+       DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER)         \
 };
 
 #ifdef CONFIG_X86_INTEL_MID
@@ -100,8 +123,8 @@ static inline bool intel_mid_has_msic(void)
 
 #else /* !CONFIG_X86_INTEL_MID */
 
-#define intel_mid_identify_cpu()    (0)
-#define intel_mid_has_msic()    (0)
+#define intel_mid_identify_cpu()       0
+#define intel_mid_has_msic()           0
 
 #endif /* !CONFIG_X86_INTEL_MID */
 
@@ -117,35 +140,38 @@ extern enum intel_mid_timer_options intel_mid_timer_options;
  * Penwell uses spread spectrum clock, so the freq number is not exactly
  * the same as reported by MSR based on SDM.
  */
-#define FSB_FREQ_83SKU 83200
-#define FSB_FREQ_100SKU        99840
-#define FSB_FREQ_133SKU        133000
+#define FSB_FREQ_83SKU                 83200
+#define FSB_FREQ_100SKU                        99840
+#define FSB_FREQ_133SKU                        133000
 
-#define FSB_FREQ_167SKU        167000
-#define FSB_FREQ_200SKU        200000
-#define FSB_FREQ_267SKU        267000
-#define FSB_FREQ_333SKU        333000
-#define FSB_FREQ_400SKU        400000
+#define FSB_FREQ_167SKU                        167000
+#define FSB_FREQ_200SKU                        200000
+#define FSB_FREQ_267SKU                        267000
+#define FSB_FREQ_333SKU                        333000
+#define FSB_FREQ_400SKU                        400000
 
 /* Bus Select SoC Fuse value */
-#define BSEL_SOC_FUSE_MASK     0x7
-#define BSEL_SOC_FUSE_001      0x1 /* FSB 133MHz */
-#define BSEL_SOC_FUSE_101      0x5 /* FSB 100MHz */
-#define BSEL_SOC_FUSE_111      0x7 /* FSB 83MHz */
+#define BSEL_SOC_FUSE_MASK             0x7
+/* FSB 133MHz */
+#define BSEL_SOC_FUSE_001              0x1
+/* FSB 100MHz */
+#define BSEL_SOC_FUSE_101              0x5
+/* FSB 83MHz */
+#define BSEL_SOC_FUSE_111              0x7
 
-#define SFI_MTMR_MAX_NUM 8
-#define SFI_MRTC_MAX   8
+#define SFI_MTMR_MAX_NUM               8
+#define SFI_MRTC_MAX                   8
 
 extern void intel_scu_devices_create(void);
 extern void intel_scu_devices_destroy(void);
 
 /* VRTC timer */
-#define MRST_VRTC_MAP_SZ       (1024)
-/*#define MRST_VRTC_PGOFFSET   (0xc00) */
+#define MRST_VRTC_MAP_SZ               1024
+/* #define MRST_VRTC_PGOFFSET          0xc00 */
 
 extern void intel_mid_rtc_init(void);
 
-/* the offset for the mapping of global gpio pin to irq */
-#define INTEL_MID_IRQ_OFFSET 0x100
+/* The offset for the mapping of global gpio pin to irq */
+#define INTEL_MID_IRQ_OFFSET           0x100
 
 #endif /* _ASM_X86_INTEL_MID_H */
diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h
new file mode 100644 (file)
index 0000000..2674ee3
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef _ASM_KASLR_H_
+#define _ASM_KASLR_H_
+
+unsigned long kaslr_get_random_long(const char *purpose);
+
+#ifdef CONFIG_RANDOMIZE_MEMORY
+extern unsigned long page_offset_base;
+extern unsigned long vmalloc_base;
+
+void kernel_randomize_memory(void);
+#else
+static inline void kernel_randomize_memory(void) { }
+#endif /* CONFIG_RANDOMIZE_MEMORY */
+
+#endif
index e5f5dc9787d5fa64beac6bca4a4854c05d03dc4a..1ef9d581b5d9829365160082acbb98d1f549d421 100644 (file)
@@ -26,6 +26,7 @@ extern void die(const char *, struct pt_regs *,long);
 extern int __must_check __die(const char *, struct pt_regs *, long);
 extern void show_trace(struct task_struct *t, struct pt_regs *regs,
                       unsigned long *sp, unsigned long bp);
+extern void show_stack_regs(struct pt_regs *regs);
 extern void __show_regs(struct pt_regs *regs, int all);
 extern unsigned long oops_begin(void);
 extern void oops_end(unsigned long, struct pt_regs *, int signr);
index 4ad6560847b1f5f7c20644b52e27505339880e79..7511978093ebb57d2ddb11d137a623ba4237d483 100644 (file)
@@ -50,9 +50,9 @@ static inline void local_sub(long i, local_t *l)
  * true if the result is zero, or false for all
  * other cases.
  */
-static inline int local_sub_and_test(long i, local_t *l)
+static inline bool local_sub_and_test(long i, local_t *l)
 {
-       GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", "e");
+       GEN_BINARY_RMWcc(_ASM_SUB, l->a.counter, "er", i, "%0", e);
 }
 
 /**
@@ -63,9 +63,9 @@ static inline int local_sub_and_test(long i, local_t *l)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-static inline int local_dec_and_test(local_t *l)
+static inline bool local_dec_and_test(local_t *l)
 {
-       GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", "e");
+       GEN_UNARY_RMWcc(_ASM_DEC, l->a.counter, "%0", e);
 }
 
 /**
@@ -76,9 +76,9 @@ static inline int local_dec_and_test(local_t *l)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-static inline int local_inc_and_test(local_t *l)
+static inline bool local_inc_and_test(local_t *l)
 {
-       GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", "e");
+       GEN_UNARY_RMWcc(_ASM_INC, l->a.counter, "%0", e);
 }
 
 /**
@@ -90,9 +90,9 @@ static inline int local_inc_and_test(local_t *l)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-static inline int local_add_negative(long i, local_t *l)
+static inline bool local_add_negative(long i, local_t *l)
 {
-       GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", "s");
+       GEN_BINARY_RMWcc(_ASM_ADD, l->a.counter, "er", i, "%0", s);
 }
 
 /**
index 85e6cda45a0297e20204fe926757e46b647faa2b..e9355a84fc675b936c624bcd00d5640ed43f64b8 100644 (file)
@@ -101,7 +101,7 @@ static inline int __mutex_fastpath_trylock(atomic_t *count,
                                           int (*fail_fn)(atomic_t *))
 {
        /* cmpxchg because it never induces a false contention state. */
-       if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+       if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
                return 1;
 
        return 0;
index 07537a44216ec9b2eed302183af7c57d8949a5a0..d9850758464eee05d898eb7c10b5b99cf13c3669 100644 (file)
@@ -118,10 +118,10 @@ do {                                                              \
 static inline int __mutex_fastpath_trylock(atomic_t *count,
                                           int (*fail_fn)(atomic_t *))
 {
-       if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+       if (likely(atomic_read(count) == 1 && atomic_cmpxchg(count, 1, 0) == 1))
                return 1;
-       else
-               return 0;
+
+       return 0;
 }
 
 #endif /* _ASM_X86_MUTEX_64_H */
index d5c2f8b40faabf3f52386e7d14199c362bf2373a..9215e05276478f851a01c65e4d0750db4eae0a0b 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef _ASM_X86_PAGE_64_DEFS_H
 #define _ASM_X86_PAGE_64_DEFS_H
 
+#ifndef __ASSEMBLY__
+#include <asm/kaslr.h>
+#endif
+
 #ifdef CONFIG_KASAN
 #define KASAN_STACK_ORDER 1
 #else
  * hypervisor to fit.  Choosing 16 slots here is arbitrary, but it's
  * what Xen requires.
  */
-#define __PAGE_OFFSET           _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET_BASE      _AC(0xffff880000000000, UL)
+#ifdef CONFIG_RANDOMIZE_MEMORY
+#define __PAGE_OFFSET           page_offset_base
+#else
+#define __PAGE_OFFSET           __PAGE_OFFSET_BASE
+#endif /* CONFIG_RANDOMIZE_MEMORY */
 
 #define __START_KERNEL_map     _AC(0xffffffff80000000, UL)
 
index e0ba66ca68c6fa6d4196204ac74cc9dc8bb00250..e02e3f80d363bbb87d8db6c74b2a0d5370818419 100644 (file)
@@ -510,14 +510,15 @@ do {                                                                      \
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
 #define x86_test_and_clear_bit_percpu(bit, var)                                \
 ({                                                                     \
-       int old__;                                                      \
-       asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0"           \
-                    : "=r" (old__), "+m" (var)                         \
+       bool old__;                                                     \
+       asm volatile("btr %2,"__percpu_arg(1)"\n\t"                     \
+                    CC_SET(c)                                          \
+                    : CC_OUT(c) (old__), "+m" (var)                    \
                     : "dIr" (bit));                                    \
        old__;                                                          \
 })
 
-static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
+static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
                         const unsigned long __percpu *addr)
 {
        unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
@@ -529,14 +530,14 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
 #endif
 }
 
-static inline int x86_this_cpu_variable_test_bit(int nr,
+static inline bool x86_this_cpu_variable_test_bit(int nr,
                         const unsigned long __percpu *addr)
 {
-       int oldbit;
+       bool oldbit;
 
        asm volatile("bt "__percpu_arg(2)",%1\n\t"
-                       "sbb %0,%0"
-                       : "=r" (oldbit)
+                       CC_SET(c)
+                       : CC_OUT(c) (oldbit)
                        : "m" (*(unsigned long *)addr), "Ir" (nr));
 
        return oldbit;
index 1a27396b6ea04df12b2cff8930341320e7a4a819..437feb436efa666dbe13732c7d4269160fed49e3 100644 (file)
@@ -480,7 +480,7 @@ pte_t *populate_extra_pte(unsigned long vaddr);
 
 static inline int pte_none(pte_t pte)
 {
-       return !pte.pte;
+       return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
 }
 
 #define __HAVE_ARCH_PTE_SAME
@@ -552,7 +552,8 @@ static inline int pmd_none(pmd_t pmd)
 {
        /* Only check low word on 32-bit platforms, since it might be
           out of sync with upper half. */
-       return (unsigned long)native_pmd_val(pmd) == 0;
+       unsigned long val = native_pmd_val(pmd);
+       return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
 }
 
 static inline unsigned long pmd_page_vaddr(pmd_t pmd)
@@ -616,7 +617,7 @@ static inline unsigned long pages_to_mb(unsigned long npg)
 #if CONFIG_PGTABLE_LEVELS > 2
 static inline int pud_none(pud_t pud)
 {
-       return native_pud_val(pud) == 0;
+       return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
 }
 
 static inline int pud_present(pud_t pud)
@@ -694,6 +695,12 @@ static inline int pgd_bad(pgd_t pgd)
 
 static inline int pgd_none(pgd_t pgd)
 {
+       /*
+        * There is no need to do a workaround for the KNL stray
+        * A/D bit erratum here.  PGDs only point to page tables
+        * except on 32-bit non-PAE which is not supported on
+        * KNL.
+        */
        return !native_pgd_val(pgd);
 }
 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
@@ -729,6 +736,23 @@ extern int direct_gbpages;
 void init_mem_mapping(void);
 void early_alloc_pgt_buf(void);
 
+#ifdef CONFIG_X86_64
+/* Realmode trampoline initialization. */
+extern pgd_t trampoline_pgd_entry;
+static inline void __meminit init_trampoline_default(void)
+{
+       /* Default trampoline pgd value */
+       trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
+}
+# ifdef CONFIG_RANDOMIZE_MEMORY
+void __meminit init_trampoline(void);
+# else
+#  define init_trampoline init_trampoline_default
+# endif
+#else
+static inline void init_trampoline(void) { }
+#endif
+
 /* local pte updates need not use xchg for locking */
 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
 {
index 2ee781114d3480fa1f1136f0e434b7862a361886..7e8ec7ae10faff67a1c6fd26314ed53be3ed598f 100644 (file)
@@ -140,18 +140,32 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
 #define pte_unmap(pte) ((void)(pte))/* NOP */
 
-/* Encode and de-code a swap entry */
+/*
+ * Encode and de-code a swap entry
+ *
+ * |     ...            | 11| 10|  9|8|7|6|5| 4| 3|2|1|0| <- bit number
+ * |     ...            |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
+ * | OFFSET (14->63) | TYPE (10-13) |0|X|X|X| X| X|X|X|0| <- swp entry
+ *
+ * G (8) is aliased and used as a PROT_NONE indicator for
+ * !present ptes.  We need to start storing swap entries above
+ * there.  We also need to avoid using A and D because of an
+ * erratum where they can be incorrectly set by hardware on
+ * non-present PTEs.
+ */
+#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
 #define SWP_TYPE_BITS 5
-#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
+/* Place the offset above the type: */
+#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS + 1)
 
 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
 
-#define __swp_type(x)                  (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
+#define __swp_type(x)                  (((x).val >> (SWP_TYPE_FIRST_BIT)) \
                                         & ((1U << SWP_TYPE_BITS) - 1))
-#define __swp_offset(x)                        ((x).val >> SWP_OFFSET_SHIFT)
+#define __swp_offset(x)                        ((x).val >> SWP_OFFSET_FIRST_BIT)
 #define __swp_entry(type, offset)      ((swp_entry_t) { \
-                                        ((type) << (_PAGE_BIT_PRESENT + 1)) \
-                                        | ((offset) << SWP_OFFSET_SHIFT) })
+                                        ((type) << (SWP_TYPE_FIRST_BIT)) \
+                                        | ((offset) << SWP_OFFSET_FIRST_BIT) })
 #define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val((pte)) })
 #define __swp_entry_to_pte(x)          ((pte_t) { .pte = (x).val })
 
index e6844dfb4471d643a88fa2ba7bb88f28a27bb5a2..6fdef9eef2d51d58aad4ff2f2ef9248585093d73 100644 (file)
@@ -5,6 +5,7 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
+#include <asm/kaslr.h>
 
 /*
  * These are used to make use of C type-checking..
@@ -53,10 +54,16 @@ typedef struct { pteval_t pte; } pte_t;
 #define PGDIR_MASK     (~(PGDIR_SIZE - 1))
 
 /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
-#define MAXMEM          _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
-#define VMALLOC_START    _AC(0xffffc90000000000, UL)
-#define VMALLOC_END      _AC(0xffffe8ffffffffff, UL)
-#define VMEMMAP_START   _AC(0xffffea0000000000, UL)
+#define MAXMEM         _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+#define VMALLOC_SIZE_TB        _AC(32, UL)
+#define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
+#define VMEMMAP_START  _AC(0xffffea0000000000, UL)
+#ifdef CONFIG_RANDOMIZE_MEMORY
+#define VMALLOC_START  vmalloc_base
+#else
+#define VMALLOC_START  __VMALLOC_BASE
+#endif /* CONFIG_RANDOMIZE_MEMORY */
+#define VMALLOC_END    (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
 #define MODULES_VADDR    (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
 #define MODULES_END      _AC(0xffffffffff000000, UL)
 #define MODULES_LEN   (MODULES_END - MODULES_VADDR)
index 7b5efe264eff253d1bbb9ea69fbca9caadf44389..f1218f512f62dda18942a1776f347c1101abed4c 100644 (file)
                         _PAGE_PKEY_BIT2 | \
                         _PAGE_PKEY_BIT3)
 
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+#define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
+#else
+#define _PAGE_KNL_ERRATUM_MASK 0
+#endif
+
 #ifdef CONFIG_KMEMCHECK
 #define _PAGE_HIDDEN   (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
 #else
@@ -475,8 +481,6 @@ extern pmd_t *lookup_pmd_address(unsigned long address);
 extern phys_addr_t slow_virt_to_phys(void *__address);
 extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
                                   unsigned numpages, unsigned long page_flags);
-void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
-                              unsigned numpages);
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_X86_PGTABLE_DEFS_H */
index d397deb5814677a64c6ab8debf5ea1f403dfd898..17f2186457012eb77fcb2f6ca2183d1b0a062384 100644 (file)
@@ -81,7 +81,7 @@ static __always_inline void __preempt_count_sub(int val)
  */
 static __always_inline bool __preempt_count_dec_and_test(void)
 {
-       GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
+       GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e);
 }
 
 /*
index 62c6cc3cc5d32f5490b56e4b6590fbdf74af76d9..63def9537a2d249f5814cf73fac20f7d8873e232 100644 (file)
@@ -367,10 +367,15 @@ DECLARE_PER_CPU(struct irq_stack *, hardirq_stack);
 DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
 #endif /* X86_64 */
 
-extern unsigned int xstate_size;
+extern unsigned int fpu_kernel_xstate_size;
+extern unsigned int fpu_user_xstate_size;
 
 struct perf_event;
 
+typedef struct {
+       unsigned long           seg;
+} mm_segment_t;
+
 struct thread_struct {
        /* Cached TLS descriptors: */
        struct desc_struct      tls_array[GDT_ENTRY_TLS_ENTRIES];
@@ -419,6 +424,11 @@ struct thread_struct {
        /* Max allowed port in the bitmap, in bytes: */
        unsigned                io_bitmap_max;
 
+       mm_segment_t            addr_limit;
+
+       unsigned int            sig_on_uaccess_err:1;
+       unsigned int            uaccess_err:1;  /* uaccess failed */
+
        /* Floating point and extended processor state */
        struct fpu              fpu;
        /*
@@ -490,11 +500,6 @@ static inline void load_sp0(struct tss_struct *tss,
 #define set_iopl_mask native_set_iopl_mask
 #endif /* CONFIG_PARAVIRT */
 
-typedef struct {
-       unsigned long           seg;
-} mm_segment_t;
-
-
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
 
@@ -716,6 +721,7 @@ static inline void spin_lock_prefetch(const void *x)
        .sp0                    = TOP_OF_INIT_STACK,                      \
        .sysenter_cs            = __KERNEL_CS,                            \
        .io_bitmap_ptr          = NULL,                                   \
+       .addr_limit             = KERNEL_DS,                              \
 }
 
 extern unsigned long thread_saved_pc(struct task_struct *tsk);
@@ -765,8 +771,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
 #define STACK_TOP              TASK_SIZE
 #define STACK_TOP_MAX          TASK_SIZE_MAX
 
-#define INIT_THREAD  { \
-       .sp0 = TOP_OF_INIT_STACK \
+#define INIT_THREAD  {                                         \
+       .sp0                    = TOP_OF_INIT_STACK,            \
+       .addr_limit             = KERNEL_DS,                    \
 }
 
 /*
index 8f7866a5b9a41df67475892353979c005ce8c09b..661dd305694af8878b8cdcd8b9396b9c7da3b646 100644 (file)
@@ -1,11 +1,13 @@
 #ifndef _ASM_X86_RMWcc
 #define _ASM_X86_RMWcc
 
-#ifdef CC_HAVE_ASM_GOTO
+#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CC_HAVE_ASM_GOTO)
+
+/* Use asm goto */
 
 #define __GEN_RMWcc(fullop, var, cc, ...)                              \
 do {                                                                   \
-       asm_volatile_goto (fullop "; j" cc " %l[cc_label]"              \
+       asm_volatile_goto (fullop "; j" #cc " %l[cc_label]"             \
                        : : "m" (var), ## __VA_ARGS__                   \
                        : "memory" : cc_label);                         \
        return 0;                                                       \
@@ -19,15 +21,17 @@ cc_label:                                                           \
 #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
        __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
 
-#else /* !CC_HAVE_ASM_GOTO */
+#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
+
+/* Use flags output or a set instruction */
 
 #define __GEN_RMWcc(fullop, var, cc, ...)                              \
 do {                                                                   \
-       char c;                                                         \
-       asm volatile (fullop "; set" cc " %1"                           \
-                       : "+m" (var), "=qm" (c)                         \
+       bool c;                                                         \
+       asm volatile (fullop ";" CC_SET(cc)                             \
+                       : "+m" (var), CC_OUT(cc) (c)                    \
                        : __VA_ARGS__ : "memory");                      \
-       return c != 0;                                                  \
+       return c;                                                       \
 } while (0)
 
 #define GEN_UNARY_RMWcc(op, var, arg0, cc)                             \
@@ -36,6 +40,6 @@ do {                                                                  \
 #define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
        __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
 
-#endif /* CC_HAVE_ASM_GOTO */
+#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CC_HAVE_ASM_GOTO) */
 
 #endif /* _ASM_X86_RMWcc */
index 453744c1d34752c20988cf513ff1eef75c3fa657..8dbc762ad132940bb6354ca7cfcfd4c69fd66f3a 100644 (file)
@@ -77,7 +77,7 @@ static inline void __down_read(struct rw_semaphore *sem)
 /*
  * trylock for reading -- returns 1 if successful, 0 if contention
  */
-static inline int __down_read_trylock(struct rw_semaphore *sem)
+static inline bool __down_read_trylock(struct rw_semaphore *sem)
 {
        long result, tmp;
        asm volatile("# beginning __down_read_trylock\n\t"
@@ -93,7 +93,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
                     : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
                     : "i" (RWSEM_ACTIVE_READ_BIAS)
                     : "memory", "cc");
-       return result >= 0 ? 1 : 0;
+       return result >= 0;
 }
 
 /*
@@ -134,9 +134,10 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
 /*
  * trylock for writing -- returns 1 if successful, 0 if contention
  */
-static inline int __down_write_trylock(struct rw_semaphore *sem)
+static inline bool __down_write_trylock(struct rw_semaphore *sem)
 {
-       long result, tmp;
+       bool result;
+       long tmp0, tmp1;
        asm volatile("# beginning __down_write_trylock\n\t"
                     "  mov          %0,%1\n\t"
                     "1:\n\t"
@@ -144,14 +145,14 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
                     /* was the active mask 0 before? */
                     "  jnz          2f\n\t"
                     "  mov          %1,%2\n\t"
-                    "  add          %3,%2\n\t"
+                    "  add          %4,%2\n\t"
                     LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
                     "  jnz          1b\n\t"
                     "2:\n\t"
-                    "  sete         %b1\n\t"
-                    "  movzbl       %b1, %k1\n\t"
+                    CC_SET(e)
                     "# ending __down_write_trylock\n\t"
-                    : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
+                    : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
+                      CC_OUT(e) (result)
                     : "er" (RWSEM_ACTIVE_WRITE_BIAS)
                     : "memory", "cc");
        return result;
@@ -213,23 +214,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                     : "memory", "cc");
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-       asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
-                    : "+m" (sem->count)
-                    : "er" (delta));
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-       return delta + xadd(&sem->count, delta);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_X86_RWSEM_H */
index 2138c9ae19eeb9ffdede173ecbe144a74ab38a7e..dd1e7d6387ab138e196e2271d1014ca58da8dccf 100644 (file)
@@ -81,9 +81,9 @@ static inline int __const_sigismember(sigset_t *set, int _sig)
 
 static inline int __gen_sigismember(sigset_t *set, int _sig)
 {
-       int ret;
-       asm("btl %2,%1\n\tsbbl %0,%0"
-           : "=r"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
+       unsigned char ret;
+       asm("btl %2,%1\n\tsetc %0"
+           : "=qm"(ret) : "m"(*set), "Ir"(_sig-1) : "cc");
        return ret;
 }
 
index 66b057306f404718c233c18c5603ec30e9e535ba..0576b6157f3ae302818aa67f2feb41c47c601e0a 100644 (file)
@@ -172,12 +172,6 @@ extern int safe_smp_processor_id(void);
 #elif defined(CONFIG_X86_64_SMP)
 #define raw_smp_processor_id() (this_cpu_read(cpu_number))
 
-#define stack_smp_processor_id()                                       \
-({                                                             \
-       struct thread_info *ti;                                         \
-       __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK));      \
-       ti->cpu;                                                        \
-})
 #define safe_smp_processor_id()                smp_processor_id()
 
 #endif
index f28a24b51dc7c17a7fc5f370fdf8340aa370becb..cbf8847d02a0793268819053ea2293ee198c6a00 100644 (file)
@@ -79,10 +79,10 @@ static inline void sync_change_bit(long nr, volatile unsigned long *addr)
  */
 static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
 {
-       int oldbit;
+       unsigned char oldbit;
 
-       asm volatile("lock; bts %2,%1\n\tsbbl %0,%0"
-                    : "=r" (oldbit), "+m" (ADDR)
+       asm volatile("lock; bts %2,%1\n\tsetc %0"
+                    : "=qm" (oldbit), "+m" (ADDR)
                     : "Ir" (nr) : "memory");
        return oldbit;
 }
@@ -97,10 +97,10 @@ static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
  */
 static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
-       int oldbit;
+       unsigned char oldbit;
 
-       asm volatile("lock; btr %2,%1\n\tsbbl %0,%0"
-                    : "=r" (oldbit), "+m" (ADDR)
+       asm volatile("lock; btr %2,%1\n\tsetc %0"
+                    : "=qm" (oldbit), "+m" (ADDR)
                     : "Ir" (nr) : "memory");
        return oldbit;
 }
@@ -115,10 +115,10 @@ static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
  */
 static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
 {
-       int oldbit;
+       unsigned char oldbit;
 
-       asm volatile("lock; btc %2,%1\n\tsbbl %0,%0"
-                    : "=r" (oldbit), "+m" (ADDR)
+       asm volatile("lock; btc %2,%1\n\tsetc %0"
+                    : "=qm" (oldbit), "+m" (ADDR)
                     : "Ir" (nr) : "memory");
        return oldbit;
 }
index 30c133ac05cd86d6ddba68c8993160296cafde04..89bff044a6f543032a5b77b5ae99f9a9455084dd 100644 (file)
@@ -57,9 +57,6 @@ struct thread_info {
        __u32                   flags;          /* low level flags */
        __u32                   status;         /* thread synchronous flags */
        __u32                   cpu;            /* current CPU */
-       mm_segment_t            addr_limit;
-       unsigned int            sig_on_uaccess_error:1;
-       unsigned int            uaccess_err:1;  /* uaccess failed */
 };
 
 #define INIT_THREAD_INFO(tsk)                  \
@@ -67,7 +64,6 @@ struct thread_info {
        .task           = &tsk,                 \
        .flags          = 0,                    \
        .cpu            = 0,                    \
-       .addr_limit     = KERNEL_DS,            \
 }
 
 #define init_thread_info       (init_thread_union.thread_info)
@@ -186,11 +182,6 @@ static inline unsigned long current_stack_pointer(void)
 # define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
 #endif
 
-/* Load thread_info address into "reg" */
-#define GET_THREAD_INFO(reg) \
-       _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
-       _ASM_SUB $(THREAD_SIZE),reg ;
-
 /*
  * ASM operand which evaluates to a 'thread_info' address of
  * the current task, if it is known that "reg" is exactly "off"
index 7f991bd5031b24947e0773265023ab70b934a7b8..43e87a3dd95c4f4fa45f2300b93891351235b3f3 100644 (file)
 #ifndef _ASM_X86_TOPOLOGY_H
 #define _ASM_X86_TOPOLOGY_H
 
-#ifdef CONFIG_X86_32
-# ifdef CONFIG_SMP
-#  define ENABLE_TOPO_DEFINES
-# endif
-#else
-# ifdef CONFIG_SMP
-#  define ENABLE_TOPO_DEFINES
-# endif
-#endif
-
 /*
  * to preserve the visibility of NUMA_NO_NODE definition,
  * moved to there from here.  May be used independent of
@@ -123,12 +113,20 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 #define topology_physical_package_id(cpu)      (cpu_data(cpu).phys_proc_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).cpu_core_id)
 
-#ifdef ENABLE_TOPO_DEFINES
+#ifdef CONFIG_SMP
 #define topology_core_cpumask(cpu)             (per_cpu(cpu_core_map, cpu))
 #define topology_sibling_cpumask(cpu)          (per_cpu(cpu_sibling_map, cpu))
 
 extern unsigned int __max_logical_packages;
 #define topology_max_packages()                        (__max_logical_packages)
+
+extern int __max_smt_threads;
+
+static inline int topology_max_smt_threads(void)
+{
+       return __max_smt_threads;
+}
+
 int topology_update_package_map(unsigned int apicid, unsigned int cpu);
 extern int topology_phys_to_logical_pkg(unsigned int pkg);
 #else
@@ -136,6 +134,7 @@ extern int topology_phys_to_logical_pkg(unsigned int pkg);
 static inline int
 topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
 static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
+static inline int topology_max_smt_threads(void) { return 1; }
 #endif
 
 static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
new file mode 100644 (file)
index 0000000..9217ab1
--- /dev/null
@@ -0,0 +1,119 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM x86_fpu
+
+#if !defined(_TRACE_FPU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FPU_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(x86_fpu,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu),
+
+       TP_STRUCT__entry(
+               __field(struct fpu *, fpu)
+               __field(bool, fpregs_active)
+               __field(bool, fpstate_active)
+               __field(int, counter)
+               __field(u64, xfeatures)
+               __field(u64, xcomp_bv)
+               ),
+
+       TP_fast_assign(
+               __entry->fpu            = fpu;
+               __entry->fpregs_active  = fpu->fpregs_active;
+               __entry->fpstate_active = fpu->fpstate_active;
+               __entry->counter        = fpu->counter;
+               if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+                       __entry->xfeatures = fpu->state.xsave.header.xfeatures;
+                       __entry->xcomp_bv  = fpu->state.xsave.header.xcomp_bv;
+               }
+       ),
+       TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
+                       __entry->fpu,
+                       __entry->fpregs_active,
+                       __entry->fpstate_active,
+                       __entry->counter,
+                       __entry->xfeatures,
+                       __entry->xcomp_bv
+       )
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_state,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_before_save,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_after_save,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_before_restore,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_after_restore,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_regs_activated,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_regs_deactivated,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_activate_state,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_deactivate_state,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_init_state,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_dropped,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_copy_src,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_copy_dst,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+DEFINE_EVENT(x86_fpu, x86_fpu_xstate_check_failed,
+       TP_PROTO(struct fpu *fpu),
+       TP_ARGS(fpu)
+);
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH asm/trace/
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE fpu
+#endif /* _TRACE_FPU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 7428697c5b8df1d6856ca63fdda62cb047083f28..33b6365c22fed29b7081f637fd96b0b78a5c2162 100644 (file)
@@ -35,7 +35,7 @@ extern void tsc_init(void);
 extern void mark_tsc_unstable(char *reason);
 extern int unsynchronized_tsc(void);
 extern int check_tsc_unstable(void);
-extern int check_tsc_disabled(void);
+extern unsigned long native_calibrate_cpu(void);
 extern unsigned long native_calibrate_tsc(void);
 extern unsigned long long native_sched_clock_from_tsc(u64 tsc);
 
@@ -52,7 +52,6 @@ extern int notsc_setup(char *);
 extern void tsc_save_sched_clock_state(void);
 extern void tsc_restore_sched_clock_state(void);
 
-/* MSR based TSC calibration for Intel Atom SoC platforms */
-unsigned long try_msr_calibrate_tsc(void);
+unsigned long cpu_khz_from_msr(void);
 
 #endif /* _ASM_X86_TSC_H */
index 2982387ba8171e1cd5a7ba2c4b9a800059a50791..c03bfb68c50352df52d6ae4e36fa54bdc22dbc50 100644 (file)
 #define USER_DS        MAKE_MM_SEG(TASK_SIZE_MAX)
 
 #define get_ds()       (KERNEL_DS)
-#define get_fs()       (current_thread_info()->addr_limit)
-#define set_fs(x)      (current_thread_info()->addr_limit = (x))
+#define get_fs()       (current->thread.addr_limit)
+#define set_fs(x)      (current->thread.addr_limit = (x))
 
 #define segment_eq(a, b)       ((a).seg == (b).seg)
 
-#define user_addr_max() (current_thread_info()->addr_limit.seg)
+#define user_addr_max() (current->thread.addr_limit.seg)
 #define __addr_ok(addr)        \
        ((unsigned long __force)(addr) < user_addr_max())
 
@@ -342,7 +342,26 @@ do {                                                                       \
 } while (0)
 
 #ifdef CONFIG_X86_32
-#define __get_user_asm_u64(x, ptr, retval, errret)     (x) = __get_user_bad()
+#define __get_user_asm_u64(x, ptr, retval, errret)                     \
+({                                                                     \
+       __typeof__(ptr) __ptr = (ptr);                                  \
+       asm volatile(ASM_STAC "\n"                                      \
+                    "1:        movl %2,%%eax\n"                        \
+                    "2:        movl %3,%%edx\n"                        \
+                    "3: " ASM_CLAC "\n"                                \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "4:        mov %4,%0\n"                            \
+                    "  xorl %%eax,%%eax\n"                             \
+                    "  xorl %%edx,%%edx\n"                             \
+                    "  jmp 3b\n"                                       \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 4b)                               \
+                    _ASM_EXTABLE(2b, 4b)                               \
+                    : "=r" (retval), "=A"(x)                           \
+                    : "m" (__m(__ptr)), "m" __m(((u32 *)(__ptr)) + 1), \
+                      "i" (errret), "0" (retval));                     \
+})
+
 #define __get_user_asm_ex_u64(x, ptr)                  (x) = __get_user_bad()
 #else
 #define __get_user_asm_u64(x, ptr, retval, errret) \
@@ -429,7 +448,7 @@ do {                                                                        \
 #define __get_user_nocheck(x, ptr, size)                               \
 ({                                                                     \
        int __gu_err;                                                   \
-       unsigned long __gu_val;                                         \
+       __inttype(*(ptr)) __gu_val;                                     \
        __uaccess_begin();                                              \
        __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT);    \
        __uaccess_end();                                                \
@@ -468,13 +487,13 @@ struct __large_struct { unsigned long buf[100]; };
  * uaccess_try and catch
  */
 #define uaccess_try    do {                                            \
-       current_thread_info()->uaccess_err = 0;                         \
+       current->thread.uaccess_err = 0;                                \
        __uaccess_begin();                                              \
        barrier();
 
 #define uaccess_catch(err)                                             \
        __uaccess_end();                                                \
-       (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0);    \
+       (err) |= (current->thread.uaccess_err ? -EFAULT : 0);           \
 } while (0)
 
 /**
index 2b19caa4081c7f49c8841d1d86d6571efe47a12d..32712a925f26c63f1171b05519be5134867a2c64 100644 (file)
@@ -26,6 +26,8 @@
 #  define __ARCH_WANT_COMPAT_SYS_GETDENTS64
 #  define __ARCH_WANT_COMPAT_SYS_PREADV64
 #  define __ARCH_WANT_COMPAT_SYS_PWRITEV64
+#  define __ARCH_WANT_COMPAT_SYS_PREADV64V2
+#  define __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
 
 # endif
 
index 4dcdf74dfed8606ebd0a948bd733a4d721c0d1b8..6ba79317844138672dfe7de27254425e20630b5b 100644 (file)
@@ -168,20 +168,22 @@ struct x86_legacy_devices {
  * struct x86_legacy_features - legacy x86 features
  *
  * @rtc: this device has a CMOS real-time clock present
- * @ebda_search: it's safe to search for the EBDA signature in the hardware's
- *     low RAM
+ * @reserve_bios_regions: boot code will search for the EBDA address and the
+ *     start of the 640k - 1M BIOS region.  If false, the platform must
+ *     ensure that its memory map correctly reserves sub-1MB regions as needed.
  * @devices: legacy x86 devices, refer to struct x86_legacy_devices
  *     documentation for further details.
  */
 struct x86_legacy_features {
        int rtc;
-       int ebda_search;
+       int reserve_bios_regions;
        struct x86_legacy_devices devices;
 };
 
 /**
  * struct x86_platform_ops - platform specific runtime functions
- * @calibrate_tsc:             calibrate TSC
+ * @calibrate_cpu:             calibrate CPU
+ * @calibrate_tsc:             calibrate TSC, if different from CPU
  * @get_wallclock:             get time from HW clock like RTC etc.
  * @set_wallclock:             set time back to HW clock
  * @is_untracked_pat_range     exclude from PAT logic
@@ -200,6 +202,7 @@ struct x86_legacy_features {
  *                             semantics.
  */
 struct x86_platform_ops {
+       unsigned long (*calibrate_cpu)(void);
        unsigned long (*calibrate_tsc)(void);
        void (*get_wallclock)(struct timespec *ts);
        int (*set_wallclock)(const struct timespec *ts);
index a147e676fc7b3439d156534472d63be5c2d10f4a..e991d5c8bb3a1b1d21c08f198a6d38c97f45effb 100644 (file)
@@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
        while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
                i++;
 
-       if (i == 0)
-               return 0;
+       if (!i)
+               return -ENODEV;
 
        nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
        if (!nb)
index 60078a67d7e36064e667abfd679ed569db3a8173..f943d2f453a4eb2fda5954b5115d26f7dda64d0b 100644 (file)
@@ -2045,7 +2045,7 @@ int generic_processor_info(int apicid, int version)
                int thiscpu = max + disabled_cpus - 1;
 
                pr_warning(
-                       "ACPI: NR_CPUS/possible_cpus limit of %i almost"
+                       "APIC: NR_CPUS/possible_cpus limit of %i almost"
                        " reached. Keeping one slot for boot cpu."
                        "  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
 
@@ -2057,7 +2057,7 @@ int generic_processor_info(int apicid, int version)
                int thiscpu = max + disabled_cpus;
 
                pr_warning(
-                       "ACPI: NR_CPUS/possible_cpus limit of %i reached."
+                       "APIC: NR_CPUS/possible_cpus limit of %i reached."
                        "  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
 
                disabled_cpus++;
@@ -2085,7 +2085,7 @@ int generic_processor_info(int apicid, int version)
        if (topology_update_package_map(apicid, cpu) < 0) {
                int thiscpu = max + disabled_cpus;
 
-               pr_warning("ACPI: Package limit reached. Processor %d/0x%x ignored.\n",
+               pr_warning("APIC: Package limit reached. Processor %d/0x%x ignored.\n",
                           thiscpu, apicid);
                disabled_cpus++;
                return -ENOSPC;
index 76f89e2b245afbb1a22550301e39479a8959b04d..048747778d37515651ce7d578759f582de447961 100644 (file)
@@ -181,7 +181,6 @@ static struct apic apic_flat =  {
 
        .get_apic_id                    = flat_get_apic_id,
        .set_apic_id                    = set_apic_id,
-       .apic_id_mask                   = 0xFFu << 24,
 
        .cpu_mask_to_apicid_and         = flat_cpu_mask_to_apicid_and,
 
@@ -278,7 +277,6 @@ static struct apic apic_physflat =  {
 
        .get_apic_id                    = flat_get_apic_id,
        .set_apic_id                    = set_apic_id,
-       .apic_id_mask                   = 0xFFu << 24,
 
        .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
 
index 13d19ed585142eae225625bd808c1c1acba0b790..2cebf59092d873b59081a381c80d6a3e0a0f1abd 100644 (file)
@@ -141,7 +141,6 @@ struct apic apic_noop = {
 
        .get_apic_id                    = noop_get_apic_id,
        .set_apic_id                    = NULL,
-       .apic_id_mask                   = 0x0F << 24,
 
        .cpu_mask_to_apicid_and         = flat_cpu_mask_to_apicid_and,
 
index ab5c2c685a3ce908c3677262a35fd402ba77a445..714d4fda0d5205a687c077f96e8519ab4d81db2e 100644 (file)
@@ -269,7 +269,6 @@ static const struct apic apic_numachip1 __refconst = {
 
        .get_apic_id                    = numachip1_get_apic_id,
        .set_apic_id                    = numachip1_set_apic_id,
-       .apic_id_mask                   = 0xffU << 24,
 
        .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
 
@@ -321,7 +320,6 @@ static const struct apic apic_numachip2 __refconst = {
 
        .get_apic_id                    = numachip2_get_apic_id,
        .set_apic_id                    = numachip2_set_apic_id,
-       .apic_id_mask                   = 0xffU << 24,
 
        .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
 
index cf9bd896c12d38f8ac5772a2d578fae1b68da3df..06dbaa458bfef4bfd052c5b4e7d72b59ce0410d8 100644 (file)
@@ -171,7 +171,6 @@ static struct apic apic_bigsmp = {
 
        .get_apic_id                    = bigsmp_get_apic_id,
        .set_apic_id                    = NULL,
-       .apic_id_mask                   = 0xFF << 24,
 
        .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
 
index 7c4f90dd4c2ad3ffb441faaba224212e1b10d09c..f072b95726346e68e5a195ae95cf7d503c4267f7 100644 (file)
@@ -2568,29 +2568,25 @@ static struct resource * __init ioapic_setup_resources(void)
        unsigned long n;
        struct resource *res;
        char *mem;
-       int i, num = 0;
+       int i;
 
-       for_each_ioapic(i)
-               num++;
-       if (num == 0)
+       if (nr_ioapics == 0)
                return NULL;
 
        n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
-       n *= num;
+       n *= nr_ioapics;
 
        mem = alloc_bootmem(n);
        res = (void *)mem;
 
-       mem += sizeof(struct resource) * num;
+       mem += sizeof(struct resource) * nr_ioapics;
 
-       num = 0;
        for_each_ioapic(i) {
-               res[num].name = mem;
-               res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               res[i].name = mem;
+               res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
                snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
                mem += IOAPIC_RESOURCE_NAME_SIZE;
-               ioapics[i].iomem_res = &res[num];
-               num++;
+               ioapics[i].iomem_res = &res[i];
        }
 
        ioapic_resources = res;
index f316e34abb42bef31fbf8948bb1cdd8c4491fcbe..93edfa01b408710a6d3d10b3f4a9c0dc111c8280 100644 (file)
@@ -101,7 +101,6 @@ static struct apic apic_default = {
 
        .get_apic_id                    = default_get_apic_id,
        .set_apic_id                    = NULL,
-       .apic_id_mask                   = 0x0F << 24,
 
        .cpu_mask_to_apicid_and         = flat_cpu_mask_to_apicid_and,
 
index aca8b75c15527fcb0aa78bcabbcbb9fe6b14fdc0..24170d0809ba9e45eb88fce04c0a7be4fe708b8b 100644 (file)
@@ -270,7 +270,6 @@ static struct apic apic_x2apic_cluster = {
 
        .get_apic_id                    = x2apic_get_apic_id,
        .set_apic_id                    = x2apic_set_apic_id,
-       .apic_id_mask                   = 0xFFFFFFFFu,
 
        .cpu_mask_to_apicid_and         = x2apic_cpu_mask_to_apicid_and,
 
index a1242e2c12e646d4eb2b805e4359ce9068e4db56..4f13f54f1b1f47ba75fb95cae226e1fe4978f241 100644 (file)
@@ -126,7 +126,6 @@ static struct apic apic_x2apic_phys = {
 
        .get_apic_id                    = x2apic_get_apic_id,
        .set_apic_id                    = x2apic_set_apic_id,
-       .apic_id_mask                   = 0xFFFFFFFFu,
 
        .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
 
index 29003154fafd2277e4e014f5c1ac019f1a446c32..64dd38fbf21871997bcd70a3900b79ff9341c477 100644 (file)
@@ -582,7 +582,6 @@ static struct apic __refdata apic_x2apic_uv_x = {
 
        .get_apic_id                    = x2apic_get_apic_id,
        .set_apic_id                    = set_apic_id,
-       .apic_id_mask                   = 0xFFFFFFFFu,
 
        .cpu_mask_to_apicid_and         = uv_cpu_mask_to_apicid_and,
 
@@ -919,7 +918,7 @@ static void uv_heartbeat(unsigned long ignored)
        uv_set_scir_bits(bits);
 
        /* enable next timer period */
-       mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
+       mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
 }
 
 static void uv_heartbeat_enable(int cpu)
@@ -928,7 +927,7 @@ static void uv_heartbeat_enable(int cpu)
                struct timer_list *timer = &uv_cpu_scir_info(cpu)->timer;
 
                uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
-               setup_timer(timer, uv_heartbeat, cpu);
+               setup_pinned_timer(timer, uv_heartbeat, cpu);
                timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
                add_timer_on(timer, cpu);
                uv_cpu_scir_info(cpu)->enabled = 1;
index 674134e9f5e518c3662de5cc369d9a5092e921ac..2bd5c6ff7ee7c3634af74640f993c0104c405b48 100644 (file)
@@ -31,7 +31,9 @@ void common(void) {
        BLANK();
        OFFSET(TI_flags, thread_info, flags);
        OFFSET(TI_status, thread_info, status);
-       OFFSET(TI_addr_limit, thread_info, addr_limit);
+
+       BLANK();
+       OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
 
        BLANK();
        OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
index 0fe6953f421c9b92b72c331600f435e623c3e264..d22a7b9c4f0e5409b44b58b4acad6795c82429e5 100644 (file)
@@ -1452,7 +1452,7 @@ void cpu_init(void)
        struct task_struct *me;
        struct tss_struct *t;
        unsigned long v;
-       int cpu = stack_smp_processor_id();
+       int cpu = raw_smp_processor_id();
        int i;
 
        wait_for_master_cpu(cpu);
index 6e2ffbebbcdbd053a66ee5dbfa7e421c4281b322..c1a89bc026ac9f969c6f370e387460695929f33a 100644 (file)
@@ -300,15 +300,14 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
        }
 
        /*
-        * P4 Xeon errata 037 workaround.
+        * P4 Xeon erratum 037 workaround.
         * Hardware prefetcher may cause stale data to be loaded into the cache.
         */
        if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
                if (msr_set_bit(MSR_IA32_MISC_ENABLE,
-                               MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
-                   > 0) {
+                               MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
                        pr_info("CPU: C0 stepping P4 Xeon detected.\n");
-                       pr_info("CPU: Disabling hardware prefetching (Errata 037)\n");
+                       pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
                }
        }
 
index 34c89a3e8260a7acbfd29cb380fbc09fd2dad623..83f1a98d37dbc17608cbb72ffe3adc1c5b7e0548 100644 (file)
@@ -46,7 +46,7 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
                return;
 
        mce_setup(&m);
-       m.bank = 1;
+       m.bank = -1;
        /* Fake a memory read error with unknown channel */
        m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
 
index 92e5e37d97bf4b808a7e1824ad4fd573ccf075e1..79d8ec849468e70ab42d7f5bfc19497578208a79 100644 (file)
@@ -425,7 +425,7 @@ static u64 mce_rdmsrl(u32 msr)
        }
 
        if (rdmsrl_safe(msr, &v)) {
-               WARN_ONCE(1, "mce: Unable to read msr %d!\n", msr);
+               WARN_ONCE(1, "mce: Unable to read MSR 0x%x!\n", msr);
                /*
                 * Return zero in case the access faulted. This should
                 * not happen normally but can happen if the CPU does
@@ -1309,7 +1309,7 @@ static void __restart_timer(struct timer_list *t, unsigned long interval)
 
        if (timer_pending(t)) {
                if (time_before(when, t->expires))
-                       mod_timer_pinned(t, when);
+                       mod_timer(t, when);
        } else {
                t->expires = round_jiffies(when);
                add_timer_on(t, smp_processor_id());
@@ -1735,7 +1735,7 @@ static void __mcheck_cpu_init_timer(void)
        struct timer_list *t = this_cpu_ptr(&mce_timer);
        unsigned int cpu = smp_processor_id();
 
-       setup_timer(t, mce_timer_fn, cpu);
+       setup_pinned_timer(t, mce_timer_fn, cpu);
        mce_start_timer(cpu, t);
 }
 
index 10b0661651e0cea853164f0510e88bb0909c1de9..7b7f3be783d4e700bf11ef9c106554e72e7af799 100644 (file)
@@ -93,7 +93,7 @@ const char * const amd_df_mcablock_names[] = {
 EXPORT_SYMBOL_GPL(amd_df_mcablock_names);
 
 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
-static DEFINE_PER_CPU(unsigned char, bank_map);        /* see which banks are on */
+static DEFINE_PER_CPU(unsigned int, bank_map); /* see which banks are on */
 
 static void amd_threshold_interrupt(void);
 static void amd_deferred_error_interrupt(void);
index f6f50c4ceaeceef16170d14d6d55376823bbd63f..cfa97ff67bdab29105946af41bcf5cc718c847fc 100644 (file)
@@ -39,9 +39,9 @@ __setup("nordrand", x86_rdrand_setup);
  */
 #define SANITY_CHECK_LOOPS 8
 
+#ifdef CONFIG_ARCH_RANDOM
 void x86_init_rdrand(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_ARCH_RANDOM
        unsigned long tmp;
        int i;
 
@@ -55,5 +55,5 @@ void x86_init_rdrand(struct cpuinfo_x86 *c)
                        return;
                }
        }
-#endif
 }
+#endif
index ef8017ca5ba9dab6c59e51ae91b8c353ddaa00da..92e8f0a7159cadbafba6763081c2b33293be420a 100644 (file)
@@ -87,7 +87,7 @@ static inline int valid_stack_ptr(struct task_struct *task,
                else
                        return 0;
        }
-       return p > t && p < t + THREAD_SIZE - size;
+       return p >= t && p < t + THREAD_SIZE - size;
 }
 
 unsigned long
@@ -98,6 +98,14 @@ print_context_stack(struct task_struct *task,
 {
        struct stack_frame *frame = (struct stack_frame *)bp;
 
+       /*
+        * If we overflowed the stack into a guard page, jump back to the
+        * bottom of the usable stack.
+        */
+       if ((unsigned long)task_stack_page(task) - (unsigned long)stack <
+           PAGE_SIZE)
+               stack = (unsigned long *)task_stack_page(task);
+
        while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
                unsigned long addr;
 
@@ -197,6 +205,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
        show_stack_log_lvl(task, NULL, sp, bp, "");
 }
 
+void show_stack_regs(struct pt_regs *regs)
+{
+       show_stack_log_lvl(current, regs, (unsigned long *)regs->sp, regs->bp, "");
+}
+
 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 static int die_owner = -1;
 static unsigned int die_nest_count;
@@ -226,6 +239,8 @@ unsigned long oops_begin(void)
 EXPORT_SYMBOL_GPL(oops_begin);
 NOKPROBE_SYMBOL(oops_begin);
 
+void __noreturn rewind_stack_do_exit(int signr);
+
 void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 {
        if (regs && kexec_should_crash(current))
@@ -247,7 +262,13 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
                panic("Fatal exception in interrupt");
        if (panic_on_oops)
                panic("Fatal exception");
-       do_exit(signr);
+
+       /*
+        * We're not going to return, but we might be on an IST stack or
+        * have very little stack space left.  Rewind the stack and kill
+        * the task.
+        */
+       rewind_stack_do_exit(signr);
 }
 NOKPROBE_SYMBOL(oops_end);
 
index fef917e79b9d614eeaca609c9c03e980bae2272f..948d77da3881c2b5bd65f661d5c9cb0b5537e544 100644 (file)
@@ -96,7 +96,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
        int i;
 
        if (sp == NULL) {
-               if (task)
+               if (regs)
+                       sp = (unsigned long *)regs->sp;
+               else if (task)
                        sp = (unsigned long *)task->thread.sp;
                else
                        sp = (unsigned long *)&sp;
index d558a8a49016b2c6eccb50f3685cc136fdc621ad..6dede08dd98b4447542c1d81eaaaeac46a8ffb05 100644 (file)
@@ -264,7 +264,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
         * back trace for this cpu:
         */
        if (sp == NULL) {
-               if (task)
+               if (regs)
+                       sp = (unsigned long *)regs->sp;
+               else if (task)
                        sp = (unsigned long *)task->thread.sp;
                else
                        sp = (unsigned long *)&sp;
@@ -272,6 +274,8 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
 
        stack = sp;
        for (i = 0; i < kstack_depth_to_print; i++) {
+               unsigned long word;
+
                if (stack >= irq_stack && stack <= irq_stack_end) {
                        if (stack == irq_stack_end) {
                                stack = (unsigned long *) (irq_stack_end[-1]);
@@ -281,12 +285,18 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
                if (kstack_end(stack))
                        break;
                }
+
+               if (probe_kernel_address(stack, word))
+                       break;
+
                if ((i % STACKSLOTS_PER_LINE) == 0) {
                        if (i != 0)
                                pr_cont("\n");
-                       printk("%s %016lx", log_lvl, *stack++);
+                       printk("%s %016lx", log_lvl, word);
                } else
-                       pr_cont(" %016lx", *stack++);
+                       pr_cont(" %016lx", word);
+
+               stack++;
                touch_nmi_watchdog();
        }
        preempt_enable();
index bca14c899137bfd754395772a37fbce01f721119..57b71373bae30c47e02a6806ef1a757f111fcd8e 100644 (file)
 
 #include <linux/pci.h>
 #include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
 #include <linux/pci_ids.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
 #include <drm/i915_drm.h>
 #include <asm/pci-direct.h>
 #include <asm/dma.h>
@@ -21,6 +25,9 @@
 #include <asm/iommu.h>
 #include <asm/gart.h>
 #include <asm/irq_remapping.h>
+#include <asm/early_ioremap.h>
+
+#define dev_err(msg)  pr_err("pci 0000:%02x:%02x.%d: %s", bus, slot, func, msg)
 
 static void __init fix_hypertransport_config(int num, int slot, int func)
 {
@@ -75,6 +82,13 @@ static void __init nvidia_bugs(int num, int slot, int func)
 {
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_X86_IO_APIC
+       /*
+        * Only applies to Nvidia root ports (bus 0) and not to
+        * Nvidia graphics cards with PCI ports on secondary buses.
+        */
+       if (num)
+               return;
+
        /*
         * All timer overrides on Nvidia are
         * wrong unless HPET is enabled.
@@ -590,6 +604,61 @@ static void __init force_disable_hpet(int num, int slot, int func)
 #endif
 }
 
+#define BCM4331_MMIO_SIZE      16384
+#define BCM4331_PM_CAP         0x40
+#define bcma_aread32(reg)      ioread32(mmio + 1 * BCMA_CORE_SIZE + reg)
+#define bcma_awrite32(reg, val)        iowrite32(val, mmio + 1 * BCMA_CORE_SIZE + reg)
+
+static void __init apple_airport_reset(int bus, int slot, int func)
+{
+       void __iomem *mmio;
+       u16 pmcsr;
+       u64 addr;
+       int i;
+
+       if (!dmi_match(DMI_SYS_VENDOR, "Apple Inc."))
+               return;
+
+       /* Card may have been put into PCI_D3hot by grub quirk */
+       pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+
+       if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+               pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+               write_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL, pmcsr);
+               mdelay(10);
+
+               pmcsr = read_pci_config_16(bus, slot, func, BCM4331_PM_CAP + PCI_PM_CTRL);
+               if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) {
+                       dev_err("Cannot power up Apple AirPort card\n");
+                       return;
+               }
+       }
+
+       addr  =      read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
+       addr |= (u64)read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_1) << 32;
+       addr &= PCI_BASE_ADDRESS_MEM_MASK;
+
+       mmio = early_ioremap(addr, BCM4331_MMIO_SIZE);
+       if (!mmio) {
+               dev_err("Cannot iomap Apple AirPort card\n");
+               return;
+       }
+
+       pr_info("Resetting Apple AirPort card (left enabled by EFI)\n");
+
+       for (i = 0; bcma_aread32(BCMA_RESET_ST) && i < 30; i++)
+               udelay(10);
+
+       bcma_awrite32(BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+       bcma_aread32(BCMA_RESET_CTL);
+       udelay(1);
+
+       bcma_awrite32(BCMA_RESET_CTL, 0);
+       bcma_aread32(BCMA_RESET_CTL);
+       udelay(10);
+
+       early_iounmap(mmio, BCM4331_MMIO_SIZE);
+}
 
 #define QFLAG_APPLY_ONCE       0x1
 #define QFLAG_APPLIED          0x2
@@ -603,12 +672,6 @@ struct chipset {
        void (*f)(int num, int slot, int func);
 };
 
-/*
- * Only works for devices on the root bus. If you add any devices
- * not on bus 0 readd another loop level in early_quirks(). But
- * be careful because at least the Nvidia quirk here relies on
- * only matching on bus 0.
- */
 static struct chipset early_qrk[] __initdata = {
        { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
          PCI_CLASS_BRIDGE_PCI, PCI_ANY_ID, QFLAG_APPLY_ONCE, nvidia_bugs },
@@ -638,9 +701,13 @@ static struct chipset early_qrk[] __initdata = {
         */
        { PCI_VENDOR_ID_INTEL, 0x0f00,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
+       { PCI_VENDOR_ID_BROADCOM, 0x4331,
+         PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset},
        {}
 };
 
+static void __init early_pci_scan_bus(int bus);
+
 /**
  * check_dev_quirk - apply early quirks to a given PCI device
  * @num: bus number
@@ -649,7 +716,7 @@ static struct chipset early_qrk[] __initdata = {
  *
  * Check the vendor & device ID against the early quirks table.
  *
- * If the device is single function, let early_quirks() know so we don't
+ * If the device is single function, let early_pci_scan_bus() know so we don't
  * poke at this device again.
  */
 static int __init check_dev_quirk(int num, int slot, int func)
@@ -658,6 +725,7 @@ static int __init check_dev_quirk(int num, int slot, int func)
        u16 vendor;
        u16 device;
        u8 type;
+       u8 sec;
        int i;
 
        class = read_pci_config_16(num, slot, func, PCI_CLASS_DEVICE);
@@ -685,25 +753,36 @@ static int __init check_dev_quirk(int num, int slot, int func)
 
        type = read_pci_config_byte(num, slot, func,
                                    PCI_HEADER_TYPE);
+
+       if ((type & 0x7f) == PCI_HEADER_TYPE_BRIDGE) {
+               sec = read_pci_config_byte(num, slot, func, PCI_SECONDARY_BUS);
+               if (sec > num)
+                       early_pci_scan_bus(sec);
+       }
+
        if (!(type & 0x80))
                return -1;
 
        return 0;
 }
 
-void __init early_quirks(void)
+static void __init early_pci_scan_bus(int bus)
 {
        int slot, func;
 
-       if (!early_pci_allowed())
-               return;
-
        /* Poor man's PCI discovery */
-       /* Only scan the root bus */
        for (slot = 0; slot < 32; slot++)
                for (func = 0; func < 8; func++) {
                        /* Only probe function 0 on single fn devices */
-                       if (check_dev_quirk(0, slot, func))
+                       if (check_dev_quirk(bus, slot, func))
                                break;
                }
 }
+
+void __init early_quirks(void)
+{
+       if (!early_pci_allowed())
+               return;
+
+       early_pci_scan_bus(0);
+}
index afe65dffee80b80bda7dc1f69072fc23d21472b3..4312f8ae71b78515885d8415ee5064034919b8d5 100644 (file)
@@ -6,66 +6,92 @@
 #include <asm/bios_ebda.h>
 
 /*
+ * This function reserves all conventional PC system BIOS related
+ * firmware memory areas (some of which are data, some of which
+ * are code), that must not be used by the kernel as available
+ * RAM.
+ *
  * The BIOS places the EBDA/XBDA at the top of conventional
  * memory, and usually decreases the reported amount of
- * conventional memory (int 0x12) too. This also contains a
- * workaround for Dell systems that neglect to reserve EBDA.
- * The same workaround also avoids a problem with the AMD768MPX
- * chipset: reserve a page before VGA to prevent PCI prefetch
- * into it (errata #56). Usually the page is reserved anyways,
- * unless you have no PS/2 mouse plugged in.
+ * conventional memory (int 0x12) too.
+ *
+ * This means that as a first approximation on most systems we can
+ * guess the reserved BIOS area by looking at the low BIOS RAM size
+ * value and assume that everything above that value (up to 1MB) is
+ * reserved.
+ *
+ * But life in firmware country is not that simple:
+ *
+ * - This code also contains a quirk for Dell systems that neglect
+ *   to reserve the EBDA area in the 'RAM size' value ...
+ *
+ * - The same quirk also avoids a problem with the AMD768MPX
+ *   chipset: reserve a page before VGA to prevent PCI prefetch
+ *   into it (errata #56). (Usually the page is reserved anyways,
+ *   unless you have no PS/2 mouse plugged in.)
+ *
+ * - Plus paravirt systems don't have a reliable value in the
+ *   'BIOS RAM size' pointer we can rely on, so we must quirk
+ *   them too.
+ *
+ * Due to those various problems this function is deliberately
+ * very conservative and tries to err on the side of reserving
+ * too much, to not risk reserving too little.
+ *
+ * Losing a small amount of memory in the bottom megabyte is
+ * rarely a problem, as long as we have enough memory to install
+ * the SMP bootup trampoline which *must* be in this area.
  *
- * This functions is deliberately very conservative.  Losing
- * memory in the bottom megabyte is rarely a problem, as long
- * as we have enough memory to install the trampoline.  Using
- * memory that is in use by the BIOS or by some DMA device
- * the BIOS didn't shut down *is* a big problem.
+ * Using memory that is in use by the BIOS or by some DMA device
+ * the BIOS didn't shut down *is* a big problem to the kernel,
+ * obviously.
  */
 
-#define BIOS_LOWMEM_KILOBYTES  0x413
-#define LOWMEM_CAP             0x9f000U        /* Absolute maximum */
-#define INSANE_CUTOFF          0x20000U        /* Less than this = insane */
+#define BIOS_RAM_SIZE_KB_PTR   0x413
 
-void __init reserve_ebda_region(void)
+#define BIOS_START_MIN         0x20000U        /* 128K, less than this is insane */
+#define BIOS_START_MAX         0x9f000U        /* 640K, absolute maximum */
+
+void __init reserve_bios_regions(void)
 {
-       unsigned int lowmem, ebda_addr;
+       unsigned int bios_start, ebda_start;
 
        /*
-        * To determine the position of the EBDA and the
-        * end of conventional memory, we need to look at
-        * the BIOS data area. In a paravirtual environment
-        * that area is absent. We'll just have to assume
-        * that the paravirt case can handle memory setup
-        * correctly, without our help.
+        * NOTE: In a paravirtual environment the BIOS reserved
+        * area is absent. We'll just have to assume that the
+        * paravirt case can handle memory setup correctly,
+        * without our help.
         */
-       if (!x86_platform.legacy.ebda_search)
+       if (!x86_platform.legacy.reserve_bios_regions)
                return;
 
-       /* end of low (conventional) memory */
-       lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
-       lowmem <<= 10;
-
-       /* start of EBDA area */
-       ebda_addr = get_bios_ebda();
-
        /*
-        * Note: some old Dells seem to need 4k EBDA without
-        * reporting so, so just consider the memory above 0x9f000
-        * to be off limits (bugzilla 2990).
+        * BIOS RAM size is encoded in kilobytes, convert it
+        * to bytes to get a first guess at where the BIOS
+        * firmware area starts:
         */
+       bios_start = *(unsigned short *)__va(BIOS_RAM_SIZE_KB_PTR);
+       bios_start <<= 10;
 
-       /* If the EBDA address is below 128K, assume it is bogus */
-       if (ebda_addr < INSANE_CUTOFF)
-               ebda_addr = LOWMEM_CAP;
+       /*
+        * If bios_start is less than 128K, assume it is bogus
+        * and bump it up to 640K.  Similarly, if bios_start is above 640K,
+        * don't trust it.
+        */
+       if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
+               bios_start = BIOS_START_MAX;
 
-       /* If lowmem is less than 128K, assume it is bogus */
-       if (lowmem < INSANE_CUTOFF)
-               lowmem = LOWMEM_CAP;
+       /* Get the start address of the EBDA page: */
+       ebda_start = get_bios_ebda();
 
-       /* Use the lower of the lowmem and EBDA markers as the cutoff */
-       lowmem = min(lowmem, ebda_addr);
-       lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
+       /*
+        * If the EBDA start address is sane and is below the BIOS region,
+        * then also reserve everything from the EBDA start address up to
+        * the BIOS region.
+        */
+       if (ebda_start >= BIOS_START_MIN && ebda_start < bios_start)
+               bios_start = ebda_start;
 
-       /* reserve all memory between lowmem and the 1MB mark */
-       memblock_reserve(lowmem, 0x100000 - lowmem);
+       /* Reserve all memory between bios_start and the 1MB mark: */
+       memblock_reserve(bios_start, 0x100000 - bios_start);
 }
index 97027545a72dcd4c34964aff481ac1b7a94c0df7..3fc03a09a93b1710b966a91ba8ae65750abc2f95 100644 (file)
@@ -8,10 +8,14 @@
 #include <asm/fpu/internal.h>
 #include <asm/fpu/regset.h>
 #include <asm/fpu/signal.h>
+#include <asm/fpu/types.h>
 #include <asm/traps.h>
 
 #include <linux/hardirq.h>
 
+#define CREATE_TRACE_POINTS
+#include <asm/trace/fpu.h>
+
 /*
  * Represents the initial FPU state. It's mostly (but not completely) zeroes,
  * depending on the FPU hardware format:
@@ -192,6 +196,7 @@ void fpu__save(struct fpu *fpu)
        WARN_ON_FPU(fpu != &current->thread.fpu);
 
        preempt_disable();
+       trace_x86_fpu_before_save(fpu);
        if (fpu->fpregs_active) {
                if (!copy_fpregs_to_fpstate(fpu)) {
                        if (use_eager_fpu())
@@ -200,6 +205,7 @@ void fpu__save(struct fpu *fpu)
                                fpregs_deactivate(fpu);
                }
        }
+       trace_x86_fpu_after_save(fpu);
        preempt_enable();
 }
 EXPORT_SYMBOL_GPL(fpu__save);
@@ -222,7 +228,14 @@ void fpstate_init(union fpregs_state *state)
                return;
        }
 
-       memset(state, 0, xstate_size);
+       memset(state, 0, fpu_kernel_xstate_size);
+
+       /*
+        * XRSTORS requires that this bit is set in xcomp_bv, or
+        * it will #GP. Make sure it is replaced after the memset().
+        */
+       if (static_cpu_has(X86_FEATURE_XSAVES))
+               state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
 
        if (static_cpu_has(X86_FEATURE_FXSR))
                fpstate_init_fxstate(&state->fxsave);
@@ -247,7 +260,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
         * leak into the child task:
         */
        if (use_eager_fpu())
-               memset(&dst_fpu->state.xsave, 0, xstate_size);
+               memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
 
        /*
         * Save current FPU registers directly into the child
@@ -266,7 +279,8 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
         */
        preempt_disable();
        if (!copy_fpregs_to_fpstate(dst_fpu)) {
-               memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
+               memcpy(&src_fpu->state, &dst_fpu->state,
+                      fpu_kernel_xstate_size);
 
                if (use_eager_fpu())
                        copy_kernel_to_fpregs(&src_fpu->state);
@@ -275,6 +289,9 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
        }
        preempt_enable();
 
+       trace_x86_fpu_copy_src(src_fpu);
+       trace_x86_fpu_copy_dst(dst_fpu);
+
        return 0;
 }
 
@@ -288,7 +305,9 @@ void fpu__activate_curr(struct fpu *fpu)
 
        if (!fpu->fpstate_active) {
                fpstate_init(&fpu->state);
+               trace_x86_fpu_init_state(fpu);
 
+               trace_x86_fpu_activate_state(fpu);
                /* Safe to do for the current task: */
                fpu->fpstate_active = 1;
        }
@@ -314,7 +333,9 @@ void fpu__activate_fpstate_read(struct fpu *fpu)
        } else {
                if (!fpu->fpstate_active) {
                        fpstate_init(&fpu->state);
+                       trace_x86_fpu_init_state(fpu);
 
+                       trace_x86_fpu_activate_state(fpu);
                        /* Safe to do for current and for stopped child tasks: */
                        fpu->fpstate_active = 1;
                }
@@ -347,7 +368,9 @@ void fpu__activate_fpstate_write(struct fpu *fpu)
                fpu->last_cpu = -1;
        } else {
                fpstate_init(&fpu->state);
+               trace_x86_fpu_init_state(fpu);
 
+               trace_x86_fpu_activate_state(fpu);
                /* Safe to do for stopped child tasks: */
                fpu->fpstate_active = 1;
        }
@@ -432,9 +455,11 @@ void fpu__restore(struct fpu *fpu)
 
        /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
        kernel_fpu_disable();
+       trace_x86_fpu_before_restore(fpu);
        fpregs_activate(fpu);
        copy_kernel_to_fpregs(&fpu->state);
        fpu->counter++;
+       trace_x86_fpu_after_restore(fpu);
        kernel_fpu_enable();
 }
 EXPORT_SYMBOL_GPL(fpu__restore);
@@ -463,6 +488,8 @@ void fpu__drop(struct fpu *fpu)
 
        fpu->fpstate_active = 0;
 
+       trace_x86_fpu_dropped(fpu);
+
        preempt_enable();
 }
 
index aacfd7a82cec57b9f2eb2f57e17d277a9cd74141..93982aebb39896224b28177c3212f37ca110dc70 100644 (file)
@@ -145,8 +145,8 @@ static void __init fpu__init_system_generic(void)
  * This is inherent to the XSAVE architecture which puts all state
  * components into a single, continuous memory block:
  */
-unsigned int xstate_size;
-EXPORT_SYMBOL_GPL(xstate_size);
+unsigned int fpu_kernel_xstate_size;
+EXPORT_SYMBOL_GPL(fpu_kernel_xstate_size);
 
 /* Get alignment of the TYPE. */
 #define TYPE_ALIGN(TYPE) offsetof(struct { char x; TYPE test; }, test)
@@ -178,7 +178,7 @@ static void __init fpu__init_task_struct_size(void)
         * Add back the dynamically-calculated register state
         * size.
         */
-       task_size += xstate_size;
+       task_size += fpu_kernel_xstate_size;
 
        /*
         * We dynamically size 'struct fpu', so we require that
@@ -195,7 +195,7 @@ static void __init fpu__init_task_struct_size(void)
 }
 
 /*
- * Set up the xstate_size based on the legacy FPU context size.
+ * Set up the user and kernel xstate sizes based on the legacy FPU context size.
  *
  * We set this up first, and later it will be overwritten by
  * fpu__init_system_xstate() if the CPU knows about xstates.
@@ -208,7 +208,7 @@ static void __init fpu__init_system_xstate_size_legacy(void)
        on_boot_cpu = 0;
 
        /*
-        * Note that xstate_size might be overwriten later during
+        * Note that xstate sizes might be overwritten later during
         * fpu__init_system_xstate().
         */
 
@@ -219,27 +219,17 @@ static void __init fpu__init_system_xstate_size_legacy(void)
                 */
                setup_clear_cpu_cap(X86_FEATURE_XSAVE);
                setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-               xstate_size = sizeof(struct swregs_state);
+               fpu_kernel_xstate_size = sizeof(struct swregs_state);
        } else {
                if (boot_cpu_has(X86_FEATURE_FXSR))
-                       xstate_size = sizeof(struct fxregs_state);
+                       fpu_kernel_xstate_size =
+                               sizeof(struct fxregs_state);
                else
-                       xstate_size = sizeof(struct fregs_state);
+                       fpu_kernel_xstate_size =
+                               sizeof(struct fregs_state);
        }
-       /*
-        * Quirk: we don't yet handle the XSAVES* instructions
-        * correctly, as we don't correctly convert between
-        * standard and compacted format when interfacing
-        * with user-space - so disable it for now.
-        *
-        * The difference is small: with recent CPUs the
-        * compacted format is only marginally smaller than
-        * the standard FPU state format.
-        *
-        * ( This is easy to backport while we are fixing
-        *   XSAVES* support. )
-        */
-       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+
+       fpu_user_xstate_size = fpu_kernel_xstate_size;
 }
 
 /*
index 81422dfb152b7c8e012300637b1acfd40384f697..c114b132d121783545cd938f0f77979727d213ad 100644 (file)
@@ -4,6 +4,7 @@
 #include <asm/fpu/internal.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/regset.h>
+#include <asm/fpu/xstate.h>
 
 /*
  * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
@@ -85,21 +86,26 @@ int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
        if (!boot_cpu_has(X86_FEATURE_XSAVE))
                return -ENODEV;
 
-       fpu__activate_fpstate_read(fpu);
-
        xsave = &fpu->state.xsave;
 
-       /*
-        * Copy the 48bytes defined by the software first into the xstate
-        * memory layout in the thread struct, so that we can copy the entire
-        * xstateregs to the user using one user_regset_copyout().
-        */
-       memcpy(&xsave->i387.sw_reserved,
-               xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
-       /*
-        * Copy the xstate memory layout.
-        */
-       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+       fpu__activate_fpstate_read(fpu);
+
+       if (using_compacted_format()) {
+               ret = copyout_from_xsaves(pos, count, kbuf, ubuf, xsave);
+       } else {
+               fpstate_sanitize_xstate(fpu);
+               /*
+                * Copy the 48 bytes defined by the software into the xsave
+                * area in the thread struct, so that we can copy the whole
+                * area to user using one user_regset_copyout().
+                */
+               memcpy(&xsave->i387.sw_reserved, xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
+
+               /*
+                * Copy the xstate memory layout.
+                */
+               ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+       }
        return ret;
 }
 
@@ -114,11 +120,27 @@ int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
        if (!boot_cpu_has(X86_FEATURE_XSAVE))
                return -ENODEV;
 
-       fpu__activate_fpstate_write(fpu);
+       /*
+        * A whole standard-format XSAVE buffer is needed:
+        */
+       if ((pos != 0) || (count < fpu_user_xstate_size))
+               return -EFAULT;
 
        xsave = &fpu->state.xsave;
 
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+       fpu__activate_fpstate_write(fpu);
+
+       if (boot_cpu_has(X86_FEATURE_XSAVES))
+               ret = copyin_to_xsaves(kbuf, ubuf, xsave);
+       else
+               ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+
+       /*
+        * In case of failure, mark all states as init:
+        */
+       if (ret)
+               fpstate_init(&fpu->state);
+
        /*
         * mxcsr reserved bits must be masked to zero for security reasons.
         */
index 31c6a60505e6bc6e5acb84df728d45f73ef701aa..9e231d88bb336e3585325c06e15bcd3842054ece 100644 (file)
@@ -8,8 +8,10 @@
 #include <asm/fpu/internal.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/regset.h>
+#include <asm/fpu/xstate.h>
 
 #include <asm/sigframe.h>
+#include <asm/trace/fpu.h>
 
 static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
 
@@ -31,7 +33,7 @@ static inline int check_for_xstate(struct fxregs_state __user *buf,
        /* Check for the first magic field and other error scenarios. */
        if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
            fx_sw->xstate_size < min_xstate_size ||
-           fx_sw->xstate_size > xstate_size ||
+           fx_sw->xstate_size > fpu_user_xstate_size ||
            fx_sw->xstate_size > fx_sw->extended_size)
                return -1;
 
@@ -88,7 +90,8 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
        if (!use_xsave())
                return err;
 
-       err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
+       err |= __put_user(FP_XSTATE_MAGIC2,
+                         (__u32 *)(buf + fpu_user_xstate_size));
 
        /*
         * Read the xfeatures which we copied (directly from the cpu or
@@ -125,7 +128,7 @@ static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
        else
                err = copy_fregs_to_user((struct fregs_state __user *) buf);
 
-       if (unlikely(err) && __clear_user(buf, xstate_size))
+       if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
                err = -EFAULT;
        return err;
 }
@@ -167,7 +170,7 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
                        sizeof(struct user_i387_ia32_struct), NULL,
                        (struct _fpstate_32 __user *) buf) ? -1 : 1;
 
-       if (fpregs_active()) {
+       if (fpregs_active() || using_compacted_format()) {
                /* Save the live register state to the user directly. */
                if (copy_fpregs_to_sigframe(buf_fx))
                        return -1;
@@ -175,8 +178,19 @@ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
                if (ia32_fxstate)
                        copy_fxregs_to_kernel(&tsk->thread.fpu);
        } else {
+               /*
+                * It is a *bug* if kernel uses compacted-format for xsave
+                * area and we copy it out directly to a signal frame. It
+                * should have been handled above by saving the registers
+                * directly.
+                */
+               if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+                       WARN_ONCE(1, "x86/fpu: saving compacted-format xsave area to a signal frame!\n");
+                       return -1;
+               }
+
                fpstate_sanitize_xstate(&tsk->thread.fpu);
-               if (__copy_to_user(buf_fx, xsave, xstate_size))
+               if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
                        return -1;
        }
 
@@ -250,7 +264,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
        int ia32_fxstate = (buf != buf_fx);
        struct task_struct *tsk = current;
        struct fpu *fpu = &tsk->thread.fpu;
-       int state_size = xstate_size;
+       int state_size = fpu_kernel_xstate_size;
        u64 xfeatures = 0;
        int fx_only = 0;
 
@@ -282,6 +296,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                         */
                        state_size = sizeof(struct fxregs_state);
                        fx_only = 1;
+                       trace_x86_fpu_xstate_check_failed(fpu);
                } else {
                        state_size = fx_sw_user.xstate_size;
                        xfeatures = fx_sw_user.xfeatures;
@@ -308,9 +323,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                 */
                fpu__drop(fpu);
 
-               if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
-                   __copy_from_user(&env, buf, sizeof(env))) {
+               if (using_compacted_format()) {
+                       err = copyin_to_xsaves(NULL, buf_fx,
+                                              &fpu->state.xsave);
+               } else {
+                       err = __copy_from_user(&fpu->state.xsave,
+                                              buf_fx, state_size);
+               }
+
+               if (err || __copy_from_user(&env, buf, sizeof(env))) {
                        fpstate_init(&fpu->state);
+                       trace_x86_fpu_init_state(fpu);
                        err = -1;
                } else {
                        sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
@@ -341,7 +364,8 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
 
 static inline int xstate_sigframe_size(void)
 {
-       return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
+       return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
+                       fpu_user_xstate_size;
 }
 
 /*
@@ -385,12 +409,12 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
  */
 void fpu__init_prepare_fx_sw_frame(void)
 {
-       int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
+       int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE;
 
        fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
        fx_sw_reserved.extended_size = size;
        fx_sw_reserved.xfeatures = xfeatures_mask;
-       fx_sw_reserved.xstate_size = xstate_size;
+       fx_sw_reserved.xstate_size = fpu_user_xstate_size;
 
        if (config_enabled(CONFIG_IA32_EMULATION) ||
            config_enabled(CONFIG_X86_32)) {
index 4ea2a59483c7b1b07c60178daa55b084337dcf07..680049aa4593ca773d9860a2b8af77eab3839f31 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/fpu/internal.h>
 #include <asm/fpu/signal.h>
 #include <asm/fpu/regset.h>
+#include <asm/fpu/xstate.h>
 
 #include <asm/tlbflush.h>
 
@@ -43,6 +44,13 @@ static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] =
 static unsigned int xstate_sizes[XFEATURE_MAX]   = { [ 0 ... XFEATURE_MAX - 1] = -1};
 static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
 
+/*
+ * The XSAVE area of kernel can be in standard or compacted format;
+ * it is always in standard format for user mode. This is the user
+ * mode standard format size used for signal and ptrace frames.
+ */
+unsigned int fpu_user_xstate_size;
+
 /*
  * Clear all of the X86_FEATURE_* bits that are unavailable
  * when the CPU has no XSAVE support.
@@ -105,6 +113,27 @@ int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
 }
 EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
 
+static int xfeature_is_supervisor(int xfeature_nr)
+{
+       /*
+        * We currently do not support supervisor states, but if
+        * we did, we could find out like this.
+        *
+        * SDM says: If state component 'i' is a user state component,
+        * ECX[0] return 0; if state component i is a supervisor
+        * state component, ECX[0] returns 1.
+        */
+       u32 eax, ebx, ecx, edx;
+
+       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
+       return !!(ecx & 1);
+}
+
+static int xfeature_is_user(int xfeature_nr)
+{
+       return !xfeature_is_supervisor(xfeature_nr);
+}
+
 /*
  * When executing XSAVEOPT (or other optimized XSAVE instructions), if
  * a processor implementation detects that an FPU state component is still
@@ -171,7 +200,7 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
         */
        while (xfeatures) {
                if (xfeatures & 0x1) {
-                       int offset = xstate_offsets[feature_bit];
+                       int offset = xstate_comp_offsets[feature_bit];
                        int size = xstate_sizes[feature_bit];
 
                        memcpy((void *)fx + offset,
@@ -192,6 +221,15 @@ void fpu__init_cpu_xstate(void)
 {
        if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask)
                return;
+       /*
+        * Make it clear that XSAVES supervisor states are not yet
+        * implemented should anyone expect it to work by changing
+        * bits in XFEATURE_MASK_* macros and XCR0.
+        */
+       WARN_ONCE((xfeatures_mask & XFEATURE_MASK_SUPERVISOR),
+               "x86/fpu: XSAVES supervisor states are not yet implemented.\n");
+
+       xfeatures_mask &= ~XFEATURE_MASK_SUPERVISOR;
 
        cr4_set_bits(X86_CR4_OSXSAVE);
        xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
@@ -217,13 +255,29 @@ static void __init setup_xstate_features(void)
        /* start at the beginnning of the "extended state" */
        unsigned int last_good_offset = offsetof(struct xregs_state,
                                                 extended_state_area);
+       /*
+        * The FP xstates and SSE xstates are legacy states. They are always
+        * in the fixed offsets in the xsave area in either compacted form
+        * or standard form.
+        */
+       xstate_offsets[0] = 0;
+       xstate_sizes[0] = offsetof(struct fxregs_state, xmm_space);
+       xstate_offsets[1] = xstate_sizes[0];
+       xstate_sizes[1] = FIELD_SIZEOF(struct fxregs_state, xmm_space);
 
        for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
                if (!xfeature_enabled(i))
                        continue;
 
                cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
-               xstate_offsets[i] = ebx;
+
+               /*
+                * If an xfeature is supervisor state, the offset
+                * in EBX is invalid. We leave it to -1.
+                */
+               if (xfeature_is_user(i))
+                       xstate_offsets[i] = ebx;
+
                xstate_sizes[i] = eax;
                /*
                 * In our xstate size checks, we assume that the
@@ -233,8 +287,6 @@ static void __init setup_xstate_features(void)
                WARN_ONCE(last_good_offset > xstate_offsets[i],
                        "x86/fpu: misordered xstate at %d\n", last_good_offset);
                last_good_offset = xstate_offsets[i];
-
-               printk(KERN_INFO "x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n", i, ebx, i, eax);
        }
 }
 
@@ -262,6 +314,33 @@ static void __init print_xstate_features(void)
        print_xstate_feature(XFEATURE_MASK_PKRU);
 }
 
+/*
+ * This check is important because it is easy to get XSTATE_*
+ * confused with XSTATE_BIT_*.
+ */
+#define CHECK_XFEATURE(nr) do {                \
+       WARN_ON(nr < FIRST_EXTENDED_XFEATURE);  \
+       WARN_ON(nr >= XFEATURE_MAX);    \
+} while (0)
+
+/*
+ * We could cache this like xstate_size[], but we only use
+ * it here, so it would be a waste of space.
+ */
+static int xfeature_is_aligned(int xfeature_nr)
+{
+       u32 eax, ebx, ecx, edx;
+
+       CHECK_XFEATURE(xfeature_nr);
+       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
+       /*
+        * The value returned by ECX[1] indicates the alignment
+        * of state component 'i' when the compacted format
+        * of the extended region of an XSAVE area is used:
+        */
+       return !!(ecx & 2);
+}
+
 /*
  * This function sets up offsets and sizes of all extended states in
  * xsave area. This supports both standard format and compacted format
@@ -299,10 +378,29 @@ static void __init setup_xstate_comp(void)
                else
                        xstate_comp_sizes[i] = 0;
 
-               if (i > FIRST_EXTENDED_XFEATURE)
+               if (i > FIRST_EXTENDED_XFEATURE) {
                        xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
                                        + xstate_comp_sizes[i-1];
 
+                       if (xfeature_is_aligned(i))
+                               xstate_comp_offsets[i] =
+                                       ALIGN(xstate_comp_offsets[i], 64);
+               }
+       }
+}
+
+/*
+ * Print out xstate component offsets and sizes
+ */
+static void __init print_xstate_offset_size(void)
+{
+       int i;
+
+       for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
+               if (!xfeature_enabled(i))
+                       continue;
+               pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
+                        i, xstate_comp_offsets[i], i, xstate_sizes[i]);
        }
 }
 
@@ -322,13 +420,11 @@ static void __init setup_init_fpu_buf(void)
        setup_xstate_features();
        print_xstate_features();
 
-       if (boot_cpu_has(X86_FEATURE_XSAVES)) {
+       if (boot_cpu_has(X86_FEATURE_XSAVES))
                init_fpstate.xsave.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
-               init_fpstate.xsave.header.xfeatures = xfeatures_mask;
-       }
 
        /*
-        * Init all the features state with header_bv being 0x0
+        * Init all the features state with header.xfeatures being 0x0
         */
        copy_kernel_to_xregs_booting(&init_fpstate.xsave);
 
@@ -339,58 +435,19 @@ static void __init setup_init_fpu_buf(void)
        copy_xregs_to_kernel_booting(&init_fpstate.xsave);
 }
 
-static int xfeature_is_supervisor(int xfeature_nr)
-{
-       /*
-        * We currently do not support supervisor states, but if
-        * we did, we could find out like this.
-        *
-        * SDM says: If state component i is a user state component,
-        * ECX[0] return 0; if state component i is a supervisor
-        * state component, ECX[0] returns 1.
-       u32 eax, ebx, ecx, edx;
-       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx;
-       return !!(ecx & 1);
-       */
-       return 0;
-}
-/*
-static int xfeature_is_user(int xfeature_nr)
-{
-       return !xfeature_is_supervisor(xfeature_nr);
-}
-*/
-
-/*
- * This check is important because it is easy to get XSTATE_*
- * confused with XSTATE_BIT_*.
- */
-#define CHECK_XFEATURE(nr) do {                \
-       WARN_ON(nr < FIRST_EXTENDED_XFEATURE);  \
-       WARN_ON(nr >= XFEATURE_MAX);    \
-} while (0)
-
-/*
- * We could cache this like xstate_size[], but we only use
- * it here, so it would be a waste of space.
- */
-static int xfeature_is_aligned(int xfeature_nr)
+static int xfeature_uncompacted_offset(int xfeature_nr)
 {
        u32 eax, ebx, ecx, edx;
 
-       CHECK_XFEATURE(xfeature_nr);
-       cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
        /*
-        * The value returned by ECX[1] indicates the alignment
-        * of state component i when the compacted format
-        * of the extended region of an XSAVE area is used
+        * Only XSAVES supports supervisor states and it uses compacted
+        * format. Checking a supervisor state's uncompacted offset is
+        * an error.
         */
-       return !!(ecx & 2);
-}
-
-static int xfeature_uncompacted_offset(int xfeature_nr)
-{
-       u32 eax, ebx, ecx, edx;
+       if (XFEATURE_MASK_SUPERVISOR & (1 << xfeature_nr)) {
+               WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
+               return -1;
+       }
 
        CHECK_XFEATURE(xfeature_nr);
        cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
@@ -415,7 +472,7 @@ static int xfeature_size(int xfeature_nr)
  * that it is obvious which aspect of 'XSAVES' is being handled
  * by the calling code.
  */
-static int using_compacted_format(void)
+int using_compacted_format(void)
 {
        return boot_cpu_has(X86_FEATURE_XSAVES);
 }
@@ -530,11 +587,12 @@ static void do_extra_xstate_size_checks(void)
                 */
                paranoid_xstate_size += xfeature_size(i);
        }
-       XSTATE_WARN_ON(paranoid_xstate_size != xstate_size);
+       XSTATE_WARN_ON(paranoid_xstate_size != fpu_kernel_xstate_size);
 }
 
+
 /*
- * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
+ * Get total size of enabled xstates in XCR0/xfeatures_mask.
  *
  * Note the SDM's wording here.  "sub-function 0" only enumerates
  * the size of the *user* states.  If we use it to size a buffer
@@ -544,34 +602,33 @@ static void do_extra_xstate_size_checks(void)
  * Note that we do not currently set any bits on IA32_XSS so
  * 'XCR0 | IA32_XSS == XCR0' for now.
  */
-static unsigned int __init calculate_xstate_size(void)
+static unsigned int __init get_xsaves_size(void)
 {
        unsigned int eax, ebx, ecx, edx;
-       unsigned int calculated_xstate_size;
+       /*
+        * - CPUID function 0DH, sub-function 1:
+        *    EBX enumerates the size (in bytes) required by
+        *    the XSAVES instruction for an XSAVE area
+        *    containing all the state components
+        *    corresponding to bits currently set in
+        *    XCR0 | IA32_XSS.
+        */
+       cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
+       return ebx;
+}
 
-       if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
-               /*
-                * - CPUID function 0DH, sub-function 0:
-                *    EBX enumerates the size (in bytes) required by
-                *    the XSAVE instruction for an XSAVE area
-                *    containing all the *user* state components
-                *    corresponding to bits currently set in XCR0.
-                */
-               cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
-               calculated_xstate_size = ebx;
-       } else {
-               /*
-                * - CPUID function 0DH, sub-function 1:
-                *    EBX enumerates the size (in bytes) required by
-                *    the XSAVES instruction for an XSAVE area
-                *    containing all the state components
-                *    corresponding to bits currently set in
-                *    XCR0 | IA32_XSS.
-                */
-               cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
-               calculated_xstate_size = ebx;
-       }
-       return calculated_xstate_size;
+static unsigned int __init get_xsave_size(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+       /*
+        * - CPUID function 0DH, sub-function 0:
+        *    EBX enumerates the size (in bytes) required by
+        *    the XSAVE instruction for an XSAVE area
+        *    containing all the *user* state components
+        *    corresponding to bits currently set in XCR0.
+        */
+       cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
+       return ebx;
 }
 
 /*
@@ -591,7 +648,15 @@ static bool is_supported_xstate_size(unsigned int test_xstate_size)
 static int init_xstate_size(void)
 {
        /* Recompute the context size for enabled features: */
-       unsigned int possible_xstate_size = calculate_xstate_size();
+       unsigned int possible_xstate_size;
+       unsigned int xsave_size;
+
+       xsave_size = get_xsave_size();
+
+       if (boot_cpu_has(X86_FEATURE_XSAVES))
+               possible_xstate_size = get_xsaves_size();
+       else
+               possible_xstate_size = xsave_size;
 
        /* Ensure we have the space to store all enabled: */
        if (!is_supported_xstate_size(possible_xstate_size))
@@ -601,8 +666,13 @@ static int init_xstate_size(void)
         * The size is OK, we are definitely going to use xsave,
         * make it known to the world that we need more space.
         */
-       xstate_size = possible_xstate_size;
+       fpu_kernel_xstate_size = possible_xstate_size;
        do_extra_xstate_size_checks();
+
+       /*
+        * User space is always in standard format.
+        */
+       fpu_user_xstate_size = xsave_size;
        return 0;
 }
 
@@ -644,8 +714,13 @@ void __init fpu__init_system_xstate(void)
        xfeatures_mask = eax + ((u64)edx << 32);
 
        if ((xfeatures_mask & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
+               /*
+                * This indicates that something really unexpected happened
+                * with the enumeration.  Disable XSAVE and try to continue
+                * booting without it.  This is too early to BUG().
+                */
                pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
-               BUG();
+               goto out_disable;
        }
 
        xfeatures_mask &= fpu__get_supported_xfeatures_mask();
@@ -653,21 +728,29 @@ void __init fpu__init_system_xstate(void)
        /* Enable xstate instructions to be able to continue with initialization: */
        fpu__init_cpu_xstate();
        err = init_xstate_size();
-       if (err) {
-               /* something went wrong, boot without any XSAVE support */
-               fpu__init_disable_system_xstate();
-               return;
-       }
+       if (err)
+               goto out_disable;
+
+       /*
+        * Update info used for ptrace frames; use standard-format size and no
+        * supervisor xstates:
+        */
+       update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR);
 
-       update_regset_xstate_info(xstate_size, xfeatures_mask);
        fpu__init_prepare_fx_sw_frame();
        setup_init_fpu_buf();
        setup_xstate_comp();
+       print_xstate_offset_size();
 
        pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
                xfeatures_mask,
-               xstate_size,
+               fpu_kernel_xstate_size,
                boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
+       return;
+
+out_disable:
+       /* something went wrong, try to boot without any XSAVE support */
+       fpu__init_disable_system_xstate();
 }
 
 /*
@@ -693,6 +776,11 @@ void *__raw_xsave_addr(struct xregs_state *xsave, int xstate_feature_mask)
 {
        int feature_nr = fls64(xstate_feature_mask) - 1;
 
+       if (!xfeature_enabled(feature_nr)) {
+               WARN_ON_FPU(1);
+               return NULL;
+       }
+
        return (void *)xsave + xstate_comp_offsets[feature_nr];
 }
 /*
@@ -887,16 +975,16 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
        if (!boot_cpu_has(X86_FEATURE_OSPKE))
                return -EINVAL;
 
-       /* Set the bits we need in PKRU  */
+       /* Set the bits we need in PKRU:  */
        if (init_val & PKEY_DISABLE_ACCESS)
                new_pkru_bits |= PKRU_AD_BIT;
        if (init_val & PKEY_DISABLE_WRITE)
                new_pkru_bits |= PKRU_WD_BIT;
 
-       /* Shift the bits in to the correct place in PKRU for pkey. */
+       /* Shift the bits in to the correct place in PKRU for pkey: */
        new_pkru_bits <<= pkey_shift;
 
-       /* Locate old copy of the state in the xsave buffer */
+       /* Locate old copy of the state in the xsave buffer: */
        old_pkru_state = get_xsave_addr(xsave, XFEATURE_MASK_PKRU);
 
        /*
@@ -909,9 +997,10 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
        else
                new_pkru_state.pkru = old_pkru_state->pkru;
 
-       /* mask off any old bits in place */
+       /* Mask off any old bits in place: */
        new_pkru_state.pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
-       /* Set the newly-requested bits */
+
+       /* Set the newly-requested bits: */
        new_pkru_state.pkru |= new_pkru_bits;
 
        /*
@@ -925,8 +1014,168 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
         */
        new_pkru_state.pad = 0;
 
-       fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state,
-                       sizeof(new_pkru_state));
+       fpu__xfeature_set_state(XFEATURE_MASK_PKRU, &new_pkru_state, sizeof(new_pkru_state));
+
+       return 0;
+}
+
+/*
+ * This is similar to user_regset_copyout(), but will not add offset to
+ * the source data pointer or increment pos, count, kbuf, and ubuf.
+ */
+static inline int xstate_copyout(unsigned int pos, unsigned int count,
+                                void *kbuf, void __user *ubuf,
+                                const void *data, const int start_pos,
+                                const int end_pos)
+{
+       if ((count == 0) || (pos < start_pos))
+               return 0;
+
+       if (end_pos < 0 || pos < end_pos) {
+               unsigned int copy = (end_pos < 0 ? count : min(count, end_pos - pos));
+
+               if (kbuf) {
+                       memcpy(kbuf + pos, data, copy);
+               } else {
+                       if (__copy_to_user(ubuf + pos, data, copy))
+                               return -EFAULT;
+               }
+       }
+       return 0;
+}
+
+/*
+ * Convert from kernel XSAVES compacted format to standard format and copy
+ * to a ptrace buffer. It supports partial copy but pos always starts from
+ * zero. This is called from xstateregs_get() and there we check the CPU
+ * has XSAVES.
+ */
+int copyout_from_xsaves(unsigned int pos, unsigned int count, void *kbuf,
+                       void __user *ubuf, struct xregs_state *xsave)
+{
+       unsigned int offset, size;
+       int ret, i;
+       struct xstate_header header;
+
+       /*
+        * Currently copy_regset_to_user() starts from pos 0:
+        */
+       if (unlikely(pos != 0))
+               return -EFAULT;
+
+       /*
+        * The destination is a ptrace buffer; we put in only user xstates:
+        */
+       memset(&header, 0, sizeof(header));
+       header.xfeatures = xsave->header.xfeatures;
+       header.xfeatures &= ~XFEATURE_MASK_SUPERVISOR;
+
+       /*
+        * Copy xregs_state->header:
+        */
+       offset = offsetof(struct xregs_state, header);
+       size = sizeof(header);
+
+       ret = xstate_copyout(offset, size, kbuf, ubuf, &header, 0, count);
+
+       if (ret)
+               return ret;
+
+       for (i = 0; i < XFEATURE_MAX; i++) {
+               /*
+                * Copy only in-use xstates:
+                */
+               if ((header.xfeatures >> i) & 1) {
+                       void *src = __raw_xsave_addr(xsave, 1 << i);
+
+                       offset = xstate_offsets[i];
+                       size = xstate_sizes[i];
+
+                       ret = xstate_copyout(offset, size, kbuf, ubuf, src, 0, count);
+
+                       if (ret)
+                               return ret;
+
+                       if (offset + size >= count)
+                               break;
+               }
+
+       }
+
+       /*
+        * Fill xsave->i387.sw_reserved value for ptrace frame:
+        */
+       offset = offsetof(struct fxregs_state, sw_reserved);
+       size = sizeof(xstate_fx_sw_bytes);
+
+       ret = xstate_copyout(offset, size, kbuf, ubuf, xstate_fx_sw_bytes, 0, count);
+
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+/*
+ * Convert from a ptrace standard-format buffer to kernel XSAVES format
+ * and copy to the target thread. This is called from xstateregs_set() and
+ * there we check the CPU has XSAVES and a whole standard-sized buffer
+ * exists.
+ */
+int copyin_to_xsaves(const void *kbuf, const void __user *ubuf,
+                    struct xregs_state *xsave)
+{
+       unsigned int offset, size;
+       int i;
+       u64 xfeatures;
+       u64 allowed_features;
+
+       offset = offsetof(struct xregs_state, header);
+       size = sizeof(xfeatures);
+
+       if (kbuf) {
+               memcpy(&xfeatures, kbuf + offset, size);
+       } else {
+               if (__copy_from_user(&xfeatures, ubuf + offset, size))
+                       return -EFAULT;
+       }
+
+       /*
+        * Reject if the user sets any disabled or supervisor features:
+        */
+       allowed_features = xfeatures_mask & ~XFEATURE_MASK_SUPERVISOR;
+
+       if (xfeatures & ~allowed_features)
+               return -EINVAL;
+
+       for (i = 0; i < XFEATURE_MAX; i++) {
+               u64 mask = ((u64)1 << i);
+
+               if (xfeatures & mask) {
+                       void *dst = __raw_xsave_addr(xsave, 1 << i);
+
+                       offset = xstate_offsets[i];
+                       size = xstate_sizes[i];
+
+                       if (kbuf) {
+                               memcpy(dst, kbuf + offset, size);
+                       } else {
+                               if (__copy_from_user(dst, ubuf + offset, size))
+                                       return -EFAULT;
+                       }
+               }
+       }
+
+       /*
+        * The state that came in from userspace was user-state only.
+        * Mask all the user states out of 'xfeatures':
+        */
+       xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR;
+
+       /*
+        * Add back in the features that came in from userspace:
+        */
+       xsave->header.xfeatures |= xfeatures;
 
        return 0;
 }
index d784bb547a9dd64f64ab0b9c429d7d0bff20df9d..2dda0bc4576ebf7a6940056e621d0bdb4850aa79 100644 (file)
@@ -26,7 +26,7 @@ static void __init i386_default_early_setup(void)
        x86_init.resources.reserve_resources = i386_reserve_resources;
        x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
 
-       reserve_ebda_region();
+       reserve_bios_regions();
 }
 
 asmlinkage __visible void __init i386_start_kernel(void)
index b72fb0b71dd1f84c7e9e8141281a82e758be9909..99d48e7d2974c64fe7b3fe9c1dd59e7f3b189cbb 100644 (file)
@@ -183,7 +183,7 @@ void __init x86_64_start_reservations(char *real_mode_data)
                copy_bootdata(__va(real_mode_data));
 
        x86_early_init_platform_quirks();
-       reserve_ebda_region();
+       reserve_bios_regions();
 
        switch (boot_params.hdr.hardware_subarch) {
        case X86_SUBARCH_INTEL_MID:
index 5df831ef1442f36c5ee0ac00447a2505216d1a26..9f8efc9f00756a4e19c4b12e834483b6cf7150c4 100644 (file)
@@ -38,7 +38,7 @@
 
 #define pud_index(x)   (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 
-L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
+L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
 L3_START_KERNEL = pud_index(__START_KERNEL_map)
 
@@ -299,6 +299,7 @@ ENTRY(secondary_startup_64)
        pushq   $__KERNEL_CS    # set correct cs
        pushq   %rax            # target address in negative space
        lretq
+ENDPROC(secondary_startup_64)
 
 #include "verify_cpu.S"
 
index 64341aa485ae1ad6ab62c07984c9a70dadd44c64..d40ee8a38fed3568d39410d75101111251ace73a 100644 (file)
@@ -42,3 +42,5 @@ EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(___preempt_schedule);
 EXPORT_SYMBOL(___preempt_schedule_notrace);
 #endif
+
+EXPORT_SYMBOL(__sw_hweight32);
index eea2a6f72b31c089d1b100eaefff32d1c6be4a87..1ef5e48b3a3638504ad42d1b1e7e75c959f06768 100644 (file)
@@ -301,8 +301,6 @@ static void kvm_register_steal_time(void)
        if (!has_steal_clock)
                return;
 
-       memset(st, 0, sizeof(*st));
-
        wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
        pr_info("kvm-stealtime: cpu %d, msr %llx\n",
                cpu, (unsigned long long) slow_virt_to_phys(st));
index b2f8a33b36ff4e6fb3beef130af93717f10baf11..24a50301f1504a3560066e25ea9fa0ddc0d8fdaa 100644 (file)
@@ -7,12 +7,12 @@
 void __init x86_early_init_platform_quirks(void)
 {
        x86_platform.legacy.rtc = 1;
-       x86_platform.legacy.ebda_search = 0;
+       x86_platform.legacy.reserve_bios_regions = 0;
        x86_platform.legacy.devices.pnpbios = 1;
 
        switch (boot_params.hdr.hardware_subarch) {
        case X86_SUBARCH_PC:
-               x86_platform.legacy.ebda_search = 1;
+               x86_platform.legacy.reserve_bios_regions = 1;
                break;
        case X86_SUBARCH_XEN:
        case X86_SUBARCH_LGUEST:
index a9b31eb815f23e93eae56960879448d7a63fba09..15ed70f8278b1b8f13e5e7852edb3987441c60b2 100644 (file)
@@ -54,6 +54,19 @@ bool port_cf9_safe = false;
  * Dell Inc. so their systems "just work". :-)
  */
 
+/*
+ * Some machines require the "reboot=a" commandline options
+ */
+static int __init set_acpi_reboot(const struct dmi_system_id *d)
+{
+       if (reboot_type != BOOT_ACPI) {
+               reboot_type = BOOT_ACPI;
+               pr_info("%s series board detected. Selecting %s-method for reboots.\n",
+                       d->ident, "ACPI");
+       }
+       return 0;
+}
+
 /*
  * Some machines require the "reboot=b" or "reboot=k"  commandline options,
  * this quirk makes that automatic.
@@ -395,6 +408,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"),
                },
        },
+       {       /* Handle problems with rebooting on Dell Optiplex 7450 AIO */
+               .callback = set_acpi_reboot,
+               .ident = "Dell OptiPlex 7450 AIO",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7450 AIO"),
+               },
+       },
 
        /* Hewlett-Packard */
        {       /* Handle problems with rebooting on HP laptops */
index c4e7b3991b60d4f9dc416e925c996e870969a6c2..a2616584b6e99e3c70a9187d72914663345432d6 100644 (file)
 #include <asm/prom.h>
 #include <asm/microcode.h>
 #include <asm/mmu_context.h>
+#include <asm/kaslr.h>
 
 /*
  * max_low_pfn_mapped: highest direct mapped pfn under 4GB
@@ -942,6 +943,8 @@ void __init setup_arch(char **cmdline_p)
 
        x86_init.oem.arch_setup();
 
+       kernel_randomize_memory();
+
        iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
        setup_memory_map();
        parse_setup_data();
index dc3c0b1c816fabb7112426754c3e4bbc86df986a..b44564bf86a836f66808af13947cea34bd1a9717 100644 (file)
 #include <linux/compat.h>
 #include <linux/uaccess.h>
 
+/*
+ * The compat_siginfo_t structure and handing code is very easy
+ * to break in several ways.  It must always be updated when new
+ * updates are made to the main siginfo_t, and
+ * copy_siginfo_to_user32() must be updated when the
+ * (arch-independent) copy_siginfo_to_user() is updated.
+ *
+ * It is also easy to put a new member in the compat_siginfo_t
+ * which has implicit alignment which can move internal structure
+ * alignment around breaking the ABI.  This can happen if you,
+ * for instance, put a plain 64-bit value in there.
+ */
+static inline void signal_compat_build_tests(void)
+{
+       int _sifields_offset = offsetof(compat_siginfo_t, _sifields);
+
+       /*
+        * If adding a new si_code, there is probably new data in
+        * the siginfo.  Make sure folks bumping the si_code
+        * limits also have to look at this code.  Make sure any
+        * new fields are handled in copy_siginfo_to_user32()!
+        */
+       BUILD_BUG_ON(NSIGILL  != 8);
+       BUILD_BUG_ON(NSIGFPE  != 8);
+       BUILD_BUG_ON(NSIGSEGV != 4);
+       BUILD_BUG_ON(NSIGBUS  != 5);
+       BUILD_BUG_ON(NSIGTRAP != 4);
+       BUILD_BUG_ON(NSIGCHLD != 6);
+       BUILD_BUG_ON(NSIGSYS  != 1);
+
+       /* This is part of the ABI and can never change in size: */
+       BUILD_BUG_ON(sizeof(compat_siginfo_t) != 128);
+       /*
+        * The offsets of all the (unioned) si_fields are fixed
+        * in the ABI, of course.  Make sure none of them ever
+        * move and are always at the beginning:
+        */
+       BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
+#define CHECK_CSI_OFFSET(name)   BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
+
+        /*
+        * Ensure that the size of each si_field never changes.
+        * If it does, it is a sign that the
+        * copy_siginfo_to_user32() code below needs to updated
+        * along with the size in the CHECK_SI_SIZE().
+        *
+        * We repeat this check for both the generic and compat
+        * siginfos.
+        *
+        * Note: it is OK for these to grow as long as the whole
+        * structure stays within the padding size (checked
+        * above).
+        */
+#define CHECK_CSI_SIZE(name, size) BUILD_BUG_ON(size != sizeof(((compat_siginfo_t *)0)->_sifields.name))
+#define CHECK_SI_SIZE(name, size) BUILD_BUG_ON(size != sizeof(((siginfo_t *)0)->_sifields.name))
+
+       CHECK_CSI_OFFSET(_kill);
+       CHECK_CSI_SIZE  (_kill, 2*sizeof(int));
+       CHECK_SI_SIZE   (_kill, 2*sizeof(int));
+
+       CHECK_CSI_OFFSET(_timer);
+       CHECK_CSI_SIZE  (_timer, 5*sizeof(int));
+       CHECK_SI_SIZE   (_timer, 6*sizeof(int));
+
+       CHECK_CSI_OFFSET(_rt);
+       CHECK_CSI_SIZE  (_rt, 3*sizeof(int));
+       CHECK_SI_SIZE   (_rt, 4*sizeof(int));
+
+       CHECK_CSI_OFFSET(_sigchld);
+       CHECK_CSI_SIZE  (_sigchld, 5*sizeof(int));
+       CHECK_SI_SIZE   (_sigchld, 8*sizeof(int));
+
+       CHECK_CSI_OFFSET(_sigchld_x32);
+       CHECK_CSI_SIZE  (_sigchld_x32, 7*sizeof(int));
+       /* no _sigchld_x32 in the generic siginfo_t */
+
+       CHECK_CSI_OFFSET(_sigfault);
+       CHECK_CSI_SIZE  (_sigfault, 4*sizeof(int));
+       CHECK_SI_SIZE   (_sigfault, 8*sizeof(int));
+
+       CHECK_CSI_OFFSET(_sigpoll);
+       CHECK_CSI_SIZE  (_sigpoll, 2*sizeof(int));
+       CHECK_SI_SIZE   (_sigpoll, 4*sizeof(int));
+
+       CHECK_CSI_OFFSET(_sigsys);
+       CHECK_CSI_SIZE  (_sigsys, 3*sizeof(int));
+       CHECK_SI_SIZE   (_sigsys, 4*sizeof(int));
+
+       /* any new si_fields should be added here */
+}
+
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
 {
        int err = 0;
        bool ia32 = test_thread_flag(TIF_IA32);
 
+       signal_compat_build_tests();
+
        if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
                return -EFAULT;
 
@@ -32,6 +125,21 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
                                          &to->_sifields._pad[0]);
                        switch (from->si_code >> 16) {
                        case __SI_FAULT >> 16:
+                               if (from->si_signo == SIGBUS &&
+                                   (from->si_code == BUS_MCEERR_AR ||
+                                    from->si_code == BUS_MCEERR_AO))
+                                       put_user_ex(from->si_addr_lsb, &to->si_addr_lsb);
+
+                               if (from->si_signo == SIGSEGV) {
+                                       if (from->si_code == SEGV_BNDERR) {
+                                               compat_uptr_t lower = (unsigned long)&to->si_lower;
+                                               compat_uptr_t upper = (unsigned long)&to->si_upper;
+                                               put_user_ex(lower, &to->si_lower);
+                                               put_user_ex(upper, &to->si_upper);
+                                       }
+                                       if (from->si_code == SEGV_PKUERR)
+                                               put_user_ex(from->si_pkey, &to->si_pkey);
+                               }
                                break;
                        case __SI_SYS >> 16:
                                put_user_ex(from->si_syscall, &to->si_syscall);
index fafe8b923cac2d27da4189e6be9a2761bfcd6d01..d0a51939c15040e859d88752aae918f651b22c99 100644 (file)
@@ -105,6 +105,9 @@ static unsigned int max_physical_pkg_id __read_mostly;
 unsigned int __max_logical_packages __read_mostly;
 EXPORT_SYMBOL(__max_logical_packages);
 
+/* Maximum number of SMT threads on any online core */
+int __max_smt_threads __read_mostly;
+
 static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
 {
        unsigned long flags;
@@ -493,7 +496,7 @@ void set_cpu_sibling_map(int cpu)
        bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct cpuinfo_x86 *o;
-       int i;
+       int i, threads;
 
        cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 
@@ -550,6 +553,10 @@ void set_cpu_sibling_map(int cpu)
                if (match_die(c, o) && !topology_same_node(c, o))
                        primarily_use_numa_for_topology();
        }
+
+       threads = cpumask_weight(topology_sibling_cpumask(cpu));
+       if (threads > __max_smt_threads)
+               __max_smt_threads = threads;
 }
 
 /* maps the cpu to the sched domain representing multi-core */
@@ -1285,7 +1292,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        cpumask_copy(cpu_callin_mask, cpumask_of(0));
        mb();
 
-       current_thread_info()->cpu = 0;  /* needed? */
        for_each_possible_cpu(i) {
                zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
                zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
@@ -1441,6 +1447,21 @@ __init void prefill_possible_map(void)
 
 #ifdef CONFIG_HOTPLUG_CPU
 
+/* Recompute SMT state for all CPUs on offline */
+static void recompute_smt_state(void)
+{
+       int max_threads, cpu;
+
+       max_threads = 0;
+       for_each_online_cpu (cpu) {
+               int threads = cpumask_weight(topology_sibling_cpumask(cpu));
+
+               if (threads > max_threads)
+                       max_threads = threads;
+       }
+       __max_smt_threads = max_threads;
+}
+
 static void remove_siblinginfo(int cpu)
 {
        int sibling;
@@ -1465,6 +1486,7 @@ static void remove_siblinginfo(int cpu)
        c->phys_proc_id = 0;
        c->cpu_core_id = 0;
        cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
+       recompute_smt_state();
 }
 
 static void remove_cpu_from_maps(int cpu)
index 38ba6de56edec93badec52707045b0db3a189e0a..a804b5ab32d0c841edddc29ebd4ea966ab5c3c01 100644 (file)
@@ -239,7 +239,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
        return ns;
 }
 
-static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
+static void set_cyc2ns_scale(unsigned long khz, int cpu)
 {
        unsigned long long tsc_now, ns_now;
        struct cyc2ns_data *data;
@@ -248,7 +248,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
        local_irq_save(flags);
        sched_clock_idle_sleep_event();
 
-       if (!cpu_khz)
+       if (!khz)
                goto done;
 
        data = cyc2ns_write_begin(cpu);
@@ -261,7 +261,7 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
         * time function is continuous; see the comment near struct
         * cyc2ns_data.
         */
-       clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, cpu_khz,
+       clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz,
                               NSEC_PER_MSEC, 0);
 
        /*
@@ -335,12 +335,6 @@ int check_tsc_unstable(void)
 }
 EXPORT_SYMBOL_GPL(check_tsc_unstable);
 
-int check_tsc_disabled(void)
-{
-       return tsc_disabled;
-}
-EXPORT_SYMBOL_GPL(check_tsc_disabled);
-
 #ifdef CONFIG_X86_TSC
 int __init notsc_setup(char *str)
 {
@@ -665,19 +659,77 @@ success:
 }
 
 /**
- * native_calibrate_tsc - calibrate the tsc on boot
+ * native_calibrate_tsc
+ * Determine TSC frequency via CPUID, else return 0.
  */
 unsigned long native_calibrate_tsc(void)
+{
+       unsigned int eax_denominator, ebx_numerator, ecx_hz, edx;
+       unsigned int crystal_khz;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return 0;
+
+       if (boot_cpu_data.cpuid_level < 0x15)
+               return 0;
+
+       eax_denominator = ebx_numerator = ecx_hz = edx = 0;
+
+       /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
+       cpuid(0x15, &eax_denominator, &ebx_numerator, &ecx_hz, &edx);
+
+       if (ebx_numerator == 0 || eax_denominator == 0)
+               return 0;
+
+       crystal_khz = ecx_hz / 1000;
+
+       if (crystal_khz == 0) {
+               switch (boot_cpu_data.x86_model) {
+               case 0x4E:      /* SKL */
+               case 0x5E:      /* SKL */
+                       crystal_khz = 24000;    /* 24.0 MHz */
+                       break;
+               case 0x5C:      /* BXT */
+                       crystal_khz = 19200;    /* 19.2 MHz */
+                       break;
+               }
+       }
+
+       return crystal_khz * ebx_numerator / eax_denominator;
+}
+
+static unsigned long cpu_khz_from_cpuid(void)
+{
+       unsigned int eax_base_mhz, ebx_max_mhz, ecx_bus_mhz, edx;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return 0;
+
+       if (boot_cpu_data.cpuid_level < 0x16)
+               return 0;
+
+       eax_base_mhz = ebx_max_mhz = ecx_bus_mhz = edx = 0;
+
+       cpuid(0x16, &eax_base_mhz, &ebx_max_mhz, &ecx_bus_mhz, &edx);
+
+       return eax_base_mhz * 1000;
+}
+
+/**
+ * native_calibrate_cpu - calibrate the cpu on boot
+ */
+unsigned long native_calibrate_cpu(void)
 {
        u64 tsc1, tsc2, delta, ref1, ref2;
        unsigned long tsc_pit_min = ULONG_MAX, tsc_ref_min = ULONG_MAX;
        unsigned long flags, latch, ms, fast_calibrate;
        int hpet = is_hpet_enabled(), i, loopmin;
 
-       /* Calibrate TSC using MSR for Intel Atom SoCs */
-       local_irq_save(flags);
-       fast_calibrate = try_msr_calibrate_tsc();
-       local_irq_restore(flags);
+       fast_calibrate = cpu_khz_from_cpuid();
+       if (fast_calibrate)
+               return fast_calibrate;
+
+       fast_calibrate = cpu_khz_from_msr();
        if (fast_calibrate)
                return fast_calibrate;
 
@@ -837,8 +889,12 @@ int recalibrate_cpu_khz(void)
        if (!boot_cpu_has(X86_FEATURE_TSC))
                return -ENODEV;
 
+       cpu_khz = x86_platform.calibrate_cpu();
        tsc_khz = x86_platform.calibrate_tsc();
-       cpu_khz = tsc_khz;
+       if (tsc_khz == 0)
+               tsc_khz = cpu_khz;
+       else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
+               cpu_khz = tsc_khz;
        cpu_data(0).loops_per_jiffy = cpufreq_scale(cpu_data(0).loops_per_jiffy,
                                                    cpu_khz_old, cpu_khz);
 
@@ -1244,8 +1300,18 @@ void __init tsc_init(void)
                return;
        }
 
+       cpu_khz = x86_platform.calibrate_cpu();
        tsc_khz = x86_platform.calibrate_tsc();
-       cpu_khz = tsc_khz;
+
+       /*
+        * Trust non-zero tsc_khz as authorative,
+        * and use it to sanity check cpu_khz,
+        * which will be off if system timer is off.
+        */
+       if (tsc_khz == 0)
+               tsc_khz = cpu_khz;
+       else if (abs(cpu_khz - tsc_khz) * 10 > tsc_khz)
+               cpu_khz = tsc_khz;
 
        if (!tsc_khz) {
                mark_tsc_unstable("could not calculate TSC khz");
@@ -1265,7 +1331,7 @@ void __init tsc_init(void)
         */
        for_each_possible_cpu(cpu) {
                cyc2ns_init(cpu);
-               set_cyc2ns_scale(cpu_khz, cpu);
+               set_cyc2ns_scale(tsc_khz, cpu);
        }
 
        if (tsc_disabled > 0)
index 9911a0620f9a94c1b6e1aa02290649d7c2803600..0fe720d64feff2c7027280f741495f6a9cf4722c 100644 (file)
@@ -1,14 +1,5 @@
 /*
- * tsc_msr.c - MSR based TSC calibration on Intel Atom SoC platforms.
- *
- * TSC in Intel Atom SoC runs at a constant rate which can be figured
- * by this formula:
- * <maximum core-clock to bus-clock ratio> * <maximum resolved frequency>
- * See Intel 64 and IA-32 System Programming Guid section 16.12 and 30.11.5
- * for details.
- * Especially some Intel Atom SoCs don't have PIT(i8254) or HPET, so MSR
- * based calibration is the only option.
- *
+ * tsc_msr.c - TSC frequency enumeration via MSR
  *
  * Copyright (C) 2013 Intel Corporation
  * Author: Bin Gao <bin.gao@intel.com>
 #include <asm/apic.h>
 #include <asm/param.h>
 
-/* CPU reference clock frequency: in KHz */
-#define FREQ_80                80000
-#define FREQ_83                83200
-#define FREQ_100       99840
-#define FREQ_133       133200
-#define FREQ_166       166400
-
-#define MAX_NUM_FREQS  8
+#define MAX_NUM_FREQS  9
 
 /*
- * According to Intel 64 and IA-32 System Programming Guide,
- * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
+ * If MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
  * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40].
  * Unfortunately some Intel Atom SoCs aren't quite compliant to this,
  * so we need manually differentiate SoC families. This is what the
@@ -48,17 +31,18 @@ struct freq_desc {
 
 static struct freq_desc freq_desc_tables[] = {
        /* PNW */
-       { 6, 0x27, 0, { 0, 0, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
+       { 6, 0x27, 0, { 0, 0, 0, 0, 0, 99840, 0, 83200 } },
        /* CLV+ */
-       { 6, 0x35, 0, { 0, FREQ_133, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
-       /* TNG */
-       { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } },
-       /* VLV2 */
-       { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
-       /* ANN */
-       { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
-       /* AIRMONT */
-       { 6, 0x4c, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, FREQ_80, 0, 0, 0 } },
+       { 6, 0x35, 0, { 0, 133200, 0, 0, 0, 99840, 0, 83200 } },
+       /* TNG - Intel Atom processor Z3400 series */
+       { 6, 0x4a, 1, { 0, 100000, 133300, 0, 0, 0, 0, 0 } },
+       /* VLV2 - Intel Atom processor E3000, Z3600, Z3700 series */
+       { 6, 0x37, 1, { 83300, 100000, 133300, 116700, 80000, 0, 0, 0 } },
+       /* ANN - Intel Atom processor Z3500 series */
+       { 6, 0x5a, 1, { 83300, 100000, 133300, 100000, 0, 0, 0, 0 } },
+       /* AMT - Intel Atom processor X7-Z8000 and X5-Z8000 series */
+       { 6, 0x4c, 1, { 83300, 100000, 133300, 116700,
+                       80000, 93300, 90000, 88900, 87500 } },
 };
 
 static int match_cpu(u8 family, u8 model)
@@ -79,16 +63,20 @@ static int match_cpu(u8 family, u8 model)
        (freq_desc_tables[cpu_index].freqs[freq_id])
 
 /*
- * Do MSR calibration only for known/supported CPUs.
+ * MSR-based CPU/TSC frequency discovery for certain CPUs.
  *
- * Returns the calibration value or 0 if MSR calibration failed.
+ * Set global "lapic_timer_frequency" to bus_clock_cycles/jiffy
+ * Return processor base frequency in KHz, or 0 on failure.
  */
-unsigned long try_msr_calibrate_tsc(void)
+unsigned long cpu_khz_from_msr(void)
 {
        u32 lo, hi, ratio, freq_id, freq;
        unsigned long res;
        int cpu_index;
 
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return 0;
+
        cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
        if (cpu_index < 0)
                return 0;
@@ -100,31 +88,17 @@ unsigned long try_msr_calibrate_tsc(void)
                rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
                ratio = (hi >> 8) & 0x1f;
        }
-       pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
-
-       if (!ratio)
-               goto fail;
 
        /* Get FSB FREQ ID */
        rdmsr(MSR_FSB_FREQ, lo, hi);
        freq_id = lo & 0x7;
        freq = id_to_freq(cpu_index, freq_id);
-       pr_info("Resolved frequency ID: %u, frequency: %u KHz\n",
-                               freq_id, freq);
-       if (!freq)
-               goto fail;
 
        /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
        res = freq * ratio;
-       pr_info("TSC runs at %lu KHz\n", res);
 
 #ifdef CONFIG_X86_LOCAL_APIC
        lapic_timer_frequency = (freq * 1000) / HZ;
-       pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency);
 #endif
        return res;
-
-fail:
-       pr_warn("Fast TSC calibration using MSR failed\n");
-       return 0;
 }
index 3dce1ca0a653091967f7089ce9e89c9d54399408..01f30e56f99e57c2e215b2925ffcc205ea651ee3 100644 (file)
@@ -440,10 +440,7 @@ static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
 
 static inline int is_revectored(int nr, struct revectored_struct *bitmap)
 {
-       __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
-               :"=r" (nr)
-               :"m" (*bitmap), "r" (nr));
-       return nr;
+       return test_bit(nr, bitmap->__map);
 }
 
 #define val_byte(val, n) (((__u8 *)&val)[n])
index cd05942bc9189452d8ec7c0cebd96431cf8dd394..f1aebfb49c36688b9287a9f444a223750c3170b8 100644 (file)
@@ -44,6 +44,9 @@ EXPORT_SYMBOL(clear_page);
 
 EXPORT_SYMBOL(csum_partial);
 
+EXPORT_SYMBOL(__sw_hweight32);
+EXPORT_SYMBOL(__sw_hweight64);
+
 /*
  * Export string functions. We normally rely on gcc builtin for most of these,
  * but gcc sometimes decides not to inline them.
index dad5fe9633a37e03215892c60386a779745fdb61..58b459296e13d007655e3ba8854d11832a535154 100644 (file)
@@ -92,6 +92,7 @@ static void default_nmi_init(void) { };
 static int default_i8042_detect(void) { return 1; };
 
 struct x86_platform_ops x86_platform = {
+       .calibrate_cpu                  = native_calibrate_cpu,
        .calibrate_tsc                  = native_calibrate_tsc,
        .get_wallclock                  = mach_get_cmos_time,
        .set_wallclock                  = mach_set_rtc_mmss,
index 7da5dd2057a928fd3eebed2052a6d89ee0750924..b2766723c951e967a992a9730c6b283151d41076 100644 (file)
@@ -55,9 +55,6 @@
 #include <linux/irqbypass.h>
 #include <trace/events/kvm.h>
 
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
 #include <asm/debugreg.h>
 #include <asm/msr.h>
 #include <asm/desc.h>
@@ -68,6 +65,9 @@
 #include <asm/div64.h>
 #include <asm/irq_remapping.h>
 
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
 #define MAX_IO_MSRS 256
 #define KVM_MAX_MCE_BANKS 32
 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
index 3847e736702e1153cc7e654087ca716d6b55d7da..25da5bc8d83dc2ac8c84e8de6084acea77eb1ad3 100644 (file)
@@ -1233,8 +1233,6 @@ static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val)
 static void probe_pci_console(void)
 {
        u8 cap, common_cap = 0, device_cap = 0;
-       /* Offset within BAR0 */
-       u32 device_offset;
        u32 device_len;
 
        /* Avoid recursive printk into here. */
@@ -1258,24 +1256,16 @@ static void probe_pci_console(void)
                u8 vndr = read_pci_config_byte(0, 1, 0, cap);
                if (vndr == PCI_CAP_ID_VNDR) {
                        u8 type, bar;
-                       u32 offset, length;
 
                        type = read_pci_config_byte(0, 1, 0,
                            cap + offsetof(struct virtio_pci_cap, cfg_type));
                        bar = read_pci_config_byte(0, 1, 0,
                            cap + offsetof(struct virtio_pci_cap, bar));
-                       offset = read_pci_config(0, 1, 0,
-                           cap + offsetof(struct virtio_pci_cap, offset));
-                       length = read_pci_config(0, 1, 0,
-                           cap + offsetof(struct virtio_pci_cap, length));
 
                        switch (type) {
                        case VIRTIO_PCI_CAP_DEVICE_CFG:
-                               if (bar == 0) {
+                               if (bar == 0)
                                        device_cap = cap;
-                                       device_offset = offset;
-                                       device_len = length;
-                               }
                                break;
                        case VIRTIO_PCI_CAP_PCI_CFG:
                                console_access_cap = cap;
@@ -1297,13 +1287,16 @@ static void probe_pci_console(void)
         * emerg_wr.  If it doesn't support VIRTIO_CONSOLE_F_EMERG_WRITE
         * it should ignore the access.
         */
+       device_len = read_pci_config(0, 1, 0,
+                       device_cap + offsetof(struct virtio_pci_cap, length));
        if (device_len < (offsetof(struct virtio_console_config, emerg_wr)
                          + sizeof(u32))) {
                printk(KERN_ERR "lguest: console missing emerg_wr field\n");
                return;
        }
 
-       console_cfg_offset = device_offset;
+       console_cfg_offset = read_pci_config(0, 1, 0,
+                       device_cap + offsetof(struct virtio_pci_cap, offset));
        printk(KERN_INFO "lguest: Console via virtio-pci emerg_wr\n");
 }
 
index 72a576752a7ec062f92e433362306e7a6b4648de..34a74131a12c58ef9f38712261900093b53220f1 100644 (file)
@@ -24,8 +24,9 @@ lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
 lib-y += memcpy_$(BITS).o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
 lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
+lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
 
-obj-y += msr.o msr-reg.o msr-reg-export.o
+obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
 
 ifeq ($(CONFIG_X86_32),y)
         obj-y += atomic64_32.o
index 2b0ef26da0bde8eabc38f5b42ef9e4aa02464ad4..bf603ebbfd8e26eb81f915f52103a2e9aa691cd9 100644 (file)
 
 /* Standard copy_to_user with segment limit checking */
 ENTRY(_copy_to_user)
-       GET_THREAD_INFO(%rax)
+       mov PER_CPU_VAR(current_task), %rax
        movq %rdi,%rcx
        addq %rdx,%rcx
        jc bad_to_user
-       cmpq TI_addr_limit(%rax),%rcx
+       cmpq TASK_addr_limit(%rax),%rcx
        ja bad_to_user
        ALTERNATIVE_2 "jmp copy_user_generic_unrolled",         \
                      "jmp copy_user_generic_string",           \
@@ -32,11 +32,11 @@ ENDPROC(_copy_to_user)
 
 /* Standard copy_from_user with segment limit checking */
 ENTRY(_copy_from_user)
-       GET_THREAD_INFO(%rax)
+       mov PER_CPU_VAR(current_task), %rax
        movq %rsi,%rcx
        addq %rdx,%rcx
        jc bad_from_user
-       cmpq TI_addr_limit(%rax),%rcx
+       cmpq TASK_addr_limit(%rax),%rcx
        ja bad_from_user
        ALTERNATIVE_2 "jmp copy_user_generic_unrolled",         \
                      "jmp copy_user_generic_string",           \
index 28a6654f0d08e34bb11d81747305fa8fbe67f4fe..b6fcb9a9ddbc00845d3b6f4244e812f69786b429 100644 (file)
@@ -6,6 +6,7 @@
  */
 #include <asm/checksum.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 #include <asm/smap.h>
 
 /**
index 46668cda4ffdfd5af38abe5ca973ca15aa19d8d8..0ef5128c2de8b05ce60e8beac1e43f53adcba2ba 100644 (file)
@@ -35,8 +35,8 @@
 
        .text
 ENTRY(__get_user_1)
-       GET_THREAD_INFO(%_ASM_DX)
-       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user
        ASM_STAC
 1:     movzbl (%_ASM_AX),%edx
@@ -48,8 +48,8 @@ ENDPROC(__get_user_1)
 ENTRY(__get_user_2)
        add $1,%_ASM_AX
        jc bad_get_user
-       GET_THREAD_INFO(%_ASM_DX)
-       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user
        ASM_STAC
 2:     movzwl -1(%_ASM_AX),%edx
@@ -61,8 +61,8 @@ ENDPROC(__get_user_2)
 ENTRY(__get_user_4)
        add $3,%_ASM_AX
        jc bad_get_user
-       GET_THREAD_INFO(%_ASM_DX)
-       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user
        ASM_STAC
 3:     movl -3(%_ASM_AX),%edx
@@ -75,8 +75,8 @@ ENTRY(__get_user_8)
 #ifdef CONFIG_X86_64
        add $7,%_ASM_AX
        jc bad_get_user
-       GET_THREAD_INFO(%_ASM_DX)
-       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user
        ASM_STAC
 4:     movq -7(%_ASM_AX),%rdx
@@ -86,8 +86,8 @@ ENTRY(__get_user_8)
 #else
        add $7,%_ASM_AX
        jc bad_get_user_8
-       GET_THREAD_INFO(%_ASM_DX)
-       cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+       mov PER_CPU_VAR(current_task), %_ASM_DX
+       cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user_8
        ASM_STAC
 4:     movl -7(%_ASM_AX),%edx
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
new file mode 100644 (file)
index 0000000..02de3d7
--- /dev/null
@@ -0,0 +1,77 @@
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+
+/*
+ * unsigned int __sw_hweight32(unsigned int w)
+ * %rdi: w
+ */
+ENTRY(__sw_hweight32)
+
+#ifdef CONFIG_X86_64
+       movl %edi, %eax                         # w
+#endif
+       __ASM_SIZE(push,) %__ASM_REG(dx)
+       movl %eax, %edx                         # w -> t
+       shrl %edx                               # t >>= 1
+       andl $0x55555555, %edx                  # t &= 0x55555555
+       subl %edx, %eax                         # w -= t
+
+       movl %eax, %edx                         # w -> t
+       shrl $2, %eax                           # w_tmp >>= 2
+       andl $0x33333333, %edx                  # t     &= 0x33333333
+       andl $0x33333333, %eax                  # w_tmp &= 0x33333333
+       addl %edx, %eax                         # w = w_tmp + t
+
+       movl %eax, %edx                         # w -> t
+       shrl $4, %edx                           # t >>= 4
+       addl %edx, %eax                         # w_tmp += t
+       andl  $0x0f0f0f0f, %eax                 # w_tmp &= 0x0f0f0f0f
+       imull $0x01010101, %eax, %eax           # w_tmp *= 0x01010101
+       shrl $24, %eax                          # w = w_tmp >> 24
+       __ASM_SIZE(pop,) %__ASM_REG(dx)
+       ret
+ENDPROC(__sw_hweight32)
+
+ENTRY(__sw_hweight64)
+#ifdef CONFIG_X86_64
+       pushq   %rdx
+
+       movq    %rdi, %rdx                      # w -> t
+       movabsq $0x5555555555555555, %rax
+       shrq    %rdx                            # t >>= 1
+       andq    %rdx, %rax                      # t &= 0x5555555555555555
+       movabsq $0x3333333333333333, %rdx
+       subq    %rax, %rdi                      # w -= t
+
+       movq    %rdi, %rax                      # w -> t
+       shrq    $2, %rdi                        # w_tmp >>= 2
+       andq    %rdx, %rax                      # t     &= 0x3333333333333333
+       andq    %rdi, %rdx                      # w_tmp &= 0x3333333333333333
+       addq    %rdx, %rax                      # w = w_tmp + t
+
+       movq    %rax, %rdx                      # w -> t
+       shrq    $4, %rdx                        # t >>= 4
+       addq    %rdx, %rax                      # w_tmp += t
+       movabsq $0x0f0f0f0f0f0f0f0f, %rdx
+       andq    %rdx, %rax                      # w_tmp &= 0x0f0f0f0f0f0f0f0f
+       movabsq $0x0101010101010101, %rdx
+       imulq   %rdx, %rax                      # w_tmp *= 0x0101010101010101
+       shrq    $56, %rax                       # w = w_tmp >> 56
+
+       popq    %rdx
+       ret
+#else /* CONFIG_X86_32 */
+       /* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
+       pushl   %ecx
+
+       call    __sw_hweight32
+       movl    %eax, %ecx                      # stash away result
+       movl    %edx, %eax                      # second part of input
+       call    __sw_hweight32
+       addl    %ecx, %eax                      # result
+
+       popl    %ecx
+       ret
+#endif
+ENDPROC(__sw_hweight64)
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
new file mode 100644 (file)
index 0000000..f7dfeda
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Entropy functions used on early boot for KASLR base and memory
+ * randomization. The base randomization is done in the compressed
+ * kernel and memory randomization is done early when the regular
+ * kernel starts. This file is included in the compressed kernel and
+ * normally linked in the regular.
+ */
+#include <asm/kaslr.h>
+#include <asm/msr.h>
+#include <asm/archrandom.h>
+#include <asm/e820.h>
+#include <asm/io.h>
+
+/*
+ * When built for the regular kernel, several functions need to be stubbed out
+ * or changed to their regular kernel equivalent.
+ */
+#ifndef KASLR_COMPRESSED_BOOT
+#include <asm/cpufeature.h>
+#include <asm/setup.h>
+
+#define debug_putstr(v) early_printk(v)
+#define has_cpuflag(f) boot_cpu_has(f)
+#define get_boot_seed() kaslr_offset()
+#endif
+
+#define I8254_PORT_CONTROL     0x43
+#define I8254_PORT_COUNTER0    0x40
+#define I8254_CMD_READBACK     0xC0
+#define I8254_SELECT_COUNTER0  0x02
+#define I8254_STATUS_NOTREADY  0x40
+static inline u16 i8254(void)
+{
+       u16 status, timer;
+
+       do {
+               outb(I8254_PORT_CONTROL,
+                    I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+               status = inb(I8254_PORT_COUNTER0);
+               timer  = inb(I8254_PORT_COUNTER0);
+               timer |= inb(I8254_PORT_COUNTER0) << 8;
+       } while (status & I8254_STATUS_NOTREADY);
+
+       return timer;
+}
+
+unsigned long kaslr_get_random_long(const char *purpose)
+{
+#ifdef CONFIG_X86_64
+       const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
+#else
+       const unsigned long mix_const = 0x3f39e593UL;
+#endif
+       unsigned long raw, random = get_boot_seed();
+       bool use_i8254 = true;
+
+       debug_putstr(purpose);
+       debug_putstr(" KASLR using");
+
+       if (has_cpuflag(X86_FEATURE_RDRAND)) {
+               debug_putstr(" RDRAND");
+               if (rdrand_long(&raw)) {
+                       random ^= raw;
+                       use_i8254 = false;
+               }
+       }
+
+       if (has_cpuflag(X86_FEATURE_TSC)) {
+               debug_putstr(" RDTSC");
+               raw = rdtsc();
+
+               random ^= raw;
+               use_i8254 = false;
+       }
+
+       if (use_i8254) {
+               debug_putstr(" i8254");
+               random ^= i8254();
+       }
+
+       /* Circular multiply for better bit diffusion */
+       asm("mul %3"
+           : "=a" (random), "=d" (raw)
+           : "a" (random), "rm" (mix_const));
+       random += raw;
+
+       debug_putstr("...\n");
+
+       return random;
+}
index e0817a12d32362b4e69687c7b09e424580616e5d..c891ece81e5b11a9b2337eac142c65c95286ad72 100644 (file)
  * as they get called from within inline assembly.
  */
 
-#define ENTER  GET_THREAD_INFO(%_ASM_BX)
+#define ENTER  mov PER_CPU_VAR(current_task), %_ASM_BX
 #define EXIT   ASM_CLAC ;      \
                ret
 
 .text
 ENTRY(__put_user_1)
        ENTER
-       cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
+       cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
        jae bad_put_user
        ASM_STAC
 1:     movb %al,(%_ASM_CX)
@@ -46,7 +46,7 @@ ENDPROC(__put_user_1)
 
 ENTRY(__put_user_2)
        ENTER
-       mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+       mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
        sub $1,%_ASM_BX
        cmp %_ASM_BX,%_ASM_CX
        jae bad_put_user
@@ -58,7 +58,7 @@ ENDPROC(__put_user_2)
 
 ENTRY(__put_user_4)
        ENTER
-       mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+       mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
        sub $3,%_ASM_BX
        cmp %_ASM_BX,%_ASM_CX
        jae bad_put_user
@@ -70,7 +70,7 @@ ENDPROC(__put_user_4)
 
 ENTRY(__put_user_8)
        ENTER
-       mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+       mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
        sub $7,%_ASM_BX
        cmp %_ASM_BX,%_ASM_CX
        jae bad_put_user
index 0a42327a59d71e5e2d607623ba209a69d37c6f8d..9f760cdcaf40b837d4ed9a40d6e6b55f9ebbe17f 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright 2002 Andi Kleen <ak@suse.de>
  */
 #include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 /*
  * Zero Userspace
index 62c0043a5fd545f09a584e2c1f991923c9b16afb..96d2b847e09ea504fc3ac824d347651a3bc880b9 100644 (file)
@@ -37,4 +37,5 @@ obj-$(CONFIG_NUMA_EMU)                += numa_emulation.o
 
 obj-$(CONFIG_X86_INTEL_MPX)    += mpx.o
 obj-$(CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS) += pkeys.o
+obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o
 
index 99bfb192803ffcbc22299503a48b6ae84f962f91..9a17250bcbe0efd02d0344d43a95730b0b1b752e 100644 (file)
@@ -72,9 +72,9 @@ static struct addr_marker address_markers[] = {
        { 0, "User Space" },
 #ifdef CONFIG_X86_64
        { 0x8000000000000000UL, "Kernel Space" },
-       { PAGE_OFFSET,          "Low Kernel Mapping" },
-       { VMALLOC_START,        "vmalloc() Area" },
-       { VMEMMAP_START,        "Vmemmap" },
+       { 0/* PAGE_OFFSET */,   "Low Kernel Mapping" },
+       { 0/* VMALLOC_START */, "vmalloc() Area" },
+       { 0/* VMEMMAP_START */, "Vmemmap" },
 # ifdef CONFIG_X86_ESPFIX64
        { ESPFIX_BASE_ADDR,     "ESPfix Area", 16 },
 # endif
@@ -434,8 +434,16 @@ void ptdump_walk_pgd_level_checkwx(void)
 
 static int __init pt_dump_init(void)
 {
+       /*
+        * Various markers are not compile-time constants, so assign them
+        * here.
+        */
+#ifdef CONFIG_X86_64
+       address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
+       address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
+       address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
+#endif
 #ifdef CONFIG_X86_32
-       /* Not a compile-time constant on x86-32 */
        address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
        address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
 # ifdef CONFIG_HIGHMEM
index 4bb53b89f3c55defd9098d81002eed1877b0fa07..832b98f822be72b07fe7f5ec7750cad63d0249bb 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/module.h>
 #include <asm/uaccess.h>
 #include <asm/traps.h>
+#include <asm/kdebug.h>
 
 typedef bool (*ex_handler_t)(const struct exception_table_entry *,
                            struct pt_regs *, int);
@@ -37,7 +38,7 @@ bool ex_handler_ext(const struct exception_table_entry *fixup,
                   struct pt_regs *regs, int trapnr)
 {
        /* Special hack for uaccess_err */
-       current_thread_info()->uaccess_err = 1;
+       current->thread.uaccess_err = 1;
        regs->ip = ex_fixup_addr(fixup);
        return true;
 }
@@ -46,8 +47,9 @@ EXPORT_SYMBOL(ex_handler_ext);
 bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
                             struct pt_regs *regs, int trapnr)
 {
-       WARN_ONCE(1, "unchecked MSR access error: RDMSR from 0x%x\n",
-                 (unsigned int)regs->cx);
+       if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pF)\n",
+                        (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
+               show_stack_regs(regs);
 
        /* Pretend that the read succeeded and returned 0. */
        regs->ip = ex_fixup_addr(fixup);
@@ -60,9 +62,10 @@ EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
 bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
                             struct pt_regs *regs, int trapnr)
 {
-       WARN_ONCE(1, "unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x)\n",
-                 (unsigned int)regs->cx,
-                 (unsigned int)regs->dx, (unsigned int)regs->ax);
+       if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pF)\n",
+                        (unsigned int)regs->cx, (unsigned int)regs->dx,
+                        (unsigned int)regs->ax,  regs->ip, (void *)regs->ip))
+               show_stack_regs(regs);
 
        /* Pretend that the write succeeded. */
        regs->ip = ex_fixup_addr(fixup);
index 7d1fa7cd237443e054da06255697412d5e9354f6..d22161ab941d51aee4ba4ec74d8858bc8b164c10 100644 (file)
@@ -439,7 +439,7 @@ static noinline int vmalloc_fault(unsigned long address)
         * happen within a race in page table update. In the later
         * case just flush:
         */
-       pgd = pgd_offset(current->active_mm, address);
+       pgd = (pgd_t *)__va(read_cr3()) + pgd_index(address);
        pgd_ref = pgd_offset_k(address);
        if (pgd_none(*pgd_ref))
                return -1;
@@ -737,7 +737,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
                 * In this case we need to make sure we're not recursively
                 * faulting through the emulate_vsyscall() logic.
                 */
-               if (current_thread_info()->sig_on_uaccess_error && signal) {
+               if (current->thread.sig_on_uaccess_err && signal) {
                        tsk->thread.trap_nr = X86_TRAP_PF;
                        tsk->thread.error_code = error_code | PF_USER;
                        tsk->thread.cr2 = address;
index 372aad2b32910d30eb3f062e67a7589340847d32..cc82830bc8c439745f69a4ddddc82ee736266d80 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/proto.h>
 #include <asm/dma.h>           /* for MAX_DMA_PFN */
 #include <asm/microcode.h>
+#include <asm/kaslr.h>
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
@@ -590,6 +591,9 @@ void __init init_mem_mapping(void)
        /* the ISA range is always mapped regardless of memory holes */
        init_memory_mapping(0, ISA_END_ADDRESS);
 
+       /* Init the trampoline, possibly with KASLR memory offset */
+       init_trampoline();
+
        /*
         * If the allocation is in bottom-up direction, we setup direct mapping
         * in bottom-up, otherwise we setup direct mapping in top-down.
index bce2e5d9edd458cf28f44ac921f91cc790d11a52..53cc2256cf239a54be55f3b552b788350ddf3a25 100644 (file)
@@ -328,22 +328,30 @@ void __init cleanup_highmap(void)
        }
 }
 
+/*
+ * Create PTE level page table mapping for physical addresses.
+ * It returns the last physical address mapped.
+ */
 static unsigned long __meminit
-phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
+phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
              pgprot_t prot)
 {
-       unsigned long pages = 0, next;
-       unsigned long last_map_addr = end;
+       unsigned long pages = 0, paddr_next;
+       unsigned long paddr_last = paddr_end;
+       pte_t *pte;
        int i;
 
-       pte_t *pte = pte_page + pte_index(addr);
+       pte = pte_page + pte_index(paddr);
+       i = pte_index(paddr);
 
-       for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
-               next = (addr & PAGE_MASK) + PAGE_SIZE;
-               if (addr >= end) {
+       for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
+               paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
+               if (paddr >= paddr_end) {
                        if (!after_bootmem &&
-                           !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
-                           !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
+                           !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
+                                            E820_RAM) &&
+                           !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
+                                            E820_RESERVED_KERN))
                                set_pte(pte, __pte(0));
                        continue;
                }
@@ -354,54 +362,61 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
                 * pagetable pages as RO. So assume someone who pre-setup
                 * these mappings are more intelligent.
                 */
-               if (pte_val(*pte)) {
+               if (!pte_none(*pte)) {
                        if (!after_bootmem)
                                pages++;
                        continue;
                }
 
                if (0)
-                       printk("   pte=%p addr=%lx pte=%016lx\n",
-                              pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
+                       pr_info("   pte=%p addr=%lx pte=%016lx\n", pte, paddr,
+                               pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
                pages++;
-               set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
-               last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
+               set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
+               paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
        }
 
        update_page_count(PG_LEVEL_4K, pages);
 
-       return last_map_addr;
+       return paddr_last;
 }
 
+/*
+ * Create PMD level page table mapping for physical addresses. The virtual
+ * and physical address have to be aligned at this level.
+ * It returns the last physical address mapped.
+ */
 static unsigned long __meminit
-phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
+phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
              unsigned long page_size_mask, pgprot_t prot)
 {
-       unsigned long pages = 0, next;
-       unsigned long last_map_addr = end;
+       unsigned long pages = 0, paddr_next;
+       unsigned long paddr_last = paddr_end;
 
-       int i = pmd_index(address);
+       int i = pmd_index(paddr);
 
-       for (; i < PTRS_PER_PMD; i++, address = next) {
-               pmd_t *pmd = pmd_page + pmd_index(address);
+       for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
+               pmd_t *pmd = pmd_page + pmd_index(paddr);
                pte_t *pte;
                pgprot_t new_prot = prot;
 
-               next = (address & PMD_MASK) + PMD_SIZE;
-               if (address >= end) {
+               paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
+               if (paddr >= paddr_end) {
                        if (!after_bootmem &&
-                           !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
-                           !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
+                           !e820_any_mapped(paddr & PMD_MASK, paddr_next,
+                                            E820_RAM) &&
+                           !e820_any_mapped(paddr & PMD_MASK, paddr_next,
+                                            E820_RESERVED_KERN))
                                set_pmd(pmd, __pmd(0));
                        continue;
                }
 
-               if (pmd_val(*pmd)) {
+               if (!pmd_none(*pmd)) {
                        if (!pmd_large(*pmd)) {
                                spin_lock(&init_mm.page_table_lock);
                                pte = (pte_t *)pmd_page_vaddr(*pmd);
-                               last_map_addr = phys_pte_init(pte, address,
-                                                               end, prot);
+                               paddr_last = phys_pte_init(pte, paddr,
+                                                          paddr_end, prot);
                                spin_unlock(&init_mm.page_table_lock);
                                continue;
                        }
@@ -420,7 +435,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                        if (page_size_mask & (1 << PG_LEVEL_2M)) {
                                if (!after_bootmem)
                                        pages++;
-                               last_map_addr = next;
+                               paddr_last = paddr_next;
                                continue;
                        }
                        new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
@@ -430,51 +445,65 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                        pages++;
                        spin_lock(&init_mm.page_table_lock);
                        set_pte((pte_t *)pmd,
-                               pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
+                               pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
                                        __pgprot(pgprot_val(prot) | _PAGE_PSE)));
                        spin_unlock(&init_mm.page_table_lock);
-                       last_map_addr = next;
+                       paddr_last = paddr_next;
                        continue;
                }
 
                pte = alloc_low_page();
-               last_map_addr = phys_pte_init(pte, address, end, new_prot);
+               paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
 
                spin_lock(&init_mm.page_table_lock);
                pmd_populate_kernel(&init_mm, pmd, pte);
                spin_unlock(&init_mm.page_table_lock);
        }
        update_page_count(PG_LEVEL_2M, pages);
-       return last_map_addr;
+       return paddr_last;
 }
 
+/*
+ * Create PUD level page table mapping for physical addresses. The virtual
+ * and physical address do not have to be aligned at this level. KASLR can
+ * randomize virtual addresses up to this level.
+ * It returns the last physical address mapped.
+ */
 static unsigned long __meminit
-phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
-                        unsigned long page_size_mask)
+phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
+             unsigned long page_size_mask)
 {
-       unsigned long pages = 0, next;
-       unsigned long last_map_addr = end;
-       int i = pud_index(addr);
+       unsigned long pages = 0, paddr_next;
+       unsigned long paddr_last = paddr_end;
+       unsigned long vaddr = (unsigned long)__va(paddr);
+       int i = pud_index(vaddr);
 
-       for (; i < PTRS_PER_PUD; i++, addr = next) {
-               pud_t *pud = pud_page + pud_index(addr);
+       for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
+               pud_t *pud;
                pmd_t *pmd;
                pgprot_t prot = PAGE_KERNEL;
 
-               next = (addr & PUD_MASK) + PUD_SIZE;
-               if (addr >= end) {
+               vaddr = (unsigned long)__va(paddr);
+               pud = pud_page + pud_index(vaddr);
+               paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
+
+               if (paddr >= paddr_end) {
                        if (!after_bootmem &&
-                           !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
-                           !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
+                           !e820_any_mapped(paddr & PUD_MASK, paddr_next,
+                                            E820_RAM) &&
+                           !e820_any_mapped(paddr & PUD_MASK, paddr_next,
+                                            E820_RESERVED_KERN))
                                set_pud(pud, __pud(0));
                        continue;
                }
 
-               if (pud_val(*pud)) {
+               if (!pud_none(*pud)) {
                        if (!pud_large(*pud)) {
                                pmd = pmd_offset(pud, 0);
-                               last_map_addr = phys_pmd_init(pmd, addr, end,
-                                                        page_size_mask, prot);
+                               paddr_last = phys_pmd_init(pmd, paddr,
+                                                          paddr_end,
+                                                          page_size_mask,
+                                                          prot);
                                __flush_tlb_all();
                                continue;
                        }
@@ -493,7 +522,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                        if (page_size_mask & (1 << PG_LEVEL_1G)) {
                                if (!after_bootmem)
                                        pages++;
-                               last_map_addr = next;
+                               paddr_last = paddr_next;
                                continue;
                        }
                        prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
@@ -503,16 +532,16 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                        pages++;
                        spin_lock(&init_mm.page_table_lock);
                        set_pte((pte_t *)pud,
-                               pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
+                               pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
                                        PAGE_KERNEL_LARGE));
                        spin_unlock(&init_mm.page_table_lock);
-                       last_map_addr = next;
+                       paddr_last = paddr_next;
                        continue;
                }
 
                pmd = alloc_low_page();
-               last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
-                                             prot);
+               paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
+                                          page_size_mask, prot);
 
                spin_lock(&init_mm.page_table_lock);
                pud_populate(&init_mm, pud, pmd);
@@ -522,38 +551,44 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
 
        update_page_count(PG_LEVEL_1G, pages);
 
-       return last_map_addr;
+       return paddr_last;
 }
 
+/*
+ * Create page table mapping for the physical memory for specific physical
+ * addresses. The virtual and physical addresses have to be aligned on PMD level
+ * down. It returns the last physical address mapped.
+ */
 unsigned long __meminit
-kernel_physical_mapping_init(unsigned long start,
-                            unsigned long end,
+kernel_physical_mapping_init(unsigned long paddr_start,
+                            unsigned long paddr_end,
                             unsigned long page_size_mask)
 {
        bool pgd_changed = false;
-       unsigned long next, last_map_addr = end;
-       unsigned long addr;
+       unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
 
-       start = (unsigned long)__va(start);
-       end = (unsigned long)__va(end);
-       addr = start;
+       paddr_last = paddr_end;
+       vaddr = (unsigned long)__va(paddr_start);
+       vaddr_end = (unsigned long)__va(paddr_end);
+       vaddr_start = vaddr;
 
-       for (; start < end; start = next) {
-               pgd_t *pgd = pgd_offset_k(start);
+       for (; vaddr < vaddr_end; vaddr = vaddr_next) {
+               pgd_t *pgd = pgd_offset_k(vaddr);
                pud_t *pud;
 
-               next = (start & PGDIR_MASK) + PGDIR_SIZE;
+               vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
 
                if (pgd_val(*pgd)) {
                        pud = (pud_t *)pgd_page_vaddr(*pgd);
-                       last_map_addr = phys_pud_init(pud, __pa(start),
-                                                __pa(end), page_size_mask);
+                       paddr_last = phys_pud_init(pud, __pa(vaddr),
+                                                  __pa(vaddr_end),
+                                                  page_size_mask);
                        continue;
                }
 
                pud = alloc_low_page();
-               last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
-                                                page_size_mask);
+               paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
+                                          page_size_mask);
 
                spin_lock(&init_mm.page_table_lock);
                pgd_populate(&init_mm, pgd, pud);
@@ -562,11 +597,11 @@ kernel_physical_mapping_init(unsigned long start,
        }
 
        if (pgd_changed)
-               sync_global_pgds(addr, end - 1, 0);
+               sync_global_pgds(vaddr_start, vaddr_end - 1, 0);
 
        __flush_tlb_all();
 
-       return last_map_addr;
+       return paddr_last;
 }
 
 #ifndef CONFIG_NUMA
@@ -673,7 +708,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
 
        for (i = 0; i < PTRS_PER_PTE; i++) {
                pte = pte_start + i;
-               if (pte_val(*pte))
+               if (!pte_none(*pte))
                        return;
        }
 
@@ -691,7 +726,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
 
        for (i = 0; i < PTRS_PER_PMD; i++) {
                pmd = pmd_start + i;
-               if (pmd_val(*pmd))
+               if (!pmd_none(*pmd))
                        return;
        }
 
@@ -702,27 +737,6 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
        spin_unlock(&init_mm.page_table_lock);
 }
 
-/* Return true if pgd is changed, otherwise return false. */
-static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
-{
-       pud_t *pud;
-       int i;
-
-       for (i = 0; i < PTRS_PER_PUD; i++) {
-               pud = pud_start + i;
-               if (pud_val(*pud))
-                       return false;
-       }
-
-       /* free a pud table */
-       free_pagetable(pgd_page(*pgd), 0);
-       spin_lock(&init_mm.page_table_lock);
-       pgd_clear(pgd);
-       spin_unlock(&init_mm.page_table_lock);
-
-       return true;
-}
-
 static void __meminit
 remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
                 bool direct)
@@ -913,7 +927,6 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
        unsigned long addr;
        pgd_t *pgd;
        pud_t *pud;
-       bool pgd_changed = false;
 
        for (addr = start; addr < end; addr = next) {
                next = pgd_addr_end(addr, end);
@@ -924,13 +937,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
 
                pud = (pud_t *)pgd_page_vaddr(*pgd);
                remove_pud_table(pud, addr, next, direct);
-               if (free_pud_table(pud, pgd))
-                       pgd_changed = true;
        }
 
-       if (pgd_changed)
-               sync_global_pgds(start, end - 1, 1);
-
        flush_tlb_all();
 }
 
index 1b1110fa00570e0d242926ca8f06adc37db518a5..0493c17b8a516f4212bc21fe5e32d0bce466c24f 100644 (file)
@@ -54,8 +54,8 @@ static int kasan_die_handler(struct notifier_block *self,
                             void *data)
 {
        if (val == DIE_GPF) {
-               pr_emerg("CONFIG_KASAN_INLINE enabled");
-               pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
+               pr_emerg("CONFIG_KASAN_INLINE enabled\n");
+               pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
        }
        return NOTIFY_OK;
 }
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
new file mode 100644 (file)
index 0000000..26dccd6
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * This file implements KASLR memory randomization for x86_64. It randomizes
+ * the virtual address space of kernel memory regions (physical memory
+ * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
+ * exploits relying on predictable kernel addresses.
+ *
+ * Entropy is generated using the KASLR early boot functions now shared in
+ * the lib directory (originally written by Kees Cook). Randomization is
+ * done on PGD & PUD page table levels to increase possible addresses. The
+ * physical memory mapping code was adapted to support PUD level virtual
+ * addresses. This implementation on the best configuration provides 30,000
+ * possible virtual addresses in average for each memory region. An additional
+ * low memory page is used to ensure each CPU can start with a PGD aligned
+ * virtual address (for realmode).
+ *
+ * The order of each memory region is not changed. The feature looks at
+ * the available space for the regions based on different configuration
+ * options and randomizes the base and space between each. The size of the
+ * physical memory mapping is the available physical memory.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/random.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/setup.h>
+#include <asm/kaslr.h>
+
+#include "mm_internal.h"
+
+#define TB_SHIFT 40
+
+/*
+ * Virtual address start and end range for randomization. The end changes base
+ * on configuration to have the highest amount of space for randomization.
+ * It increases the possible random position for each randomized region.
+ *
+ * You need to add an if/def entry if you introduce a new memory region
+ * compatible with KASLR. Your entry must be in logical order with memory
+ * layout. For example, ESPFIX is before EFI because its virtual address is
+ * before. You also need to add a BUILD_BUG_ON in kernel_randomize_memory to
+ * ensure that this order is correct and won't be changed.
+ */
+static const unsigned long vaddr_start = __PAGE_OFFSET_BASE;
+static const unsigned long vaddr_end = VMEMMAP_START;
+
+/* Default values */
+unsigned long page_offset_base = __PAGE_OFFSET_BASE;
+EXPORT_SYMBOL(page_offset_base);
+unsigned long vmalloc_base = __VMALLOC_BASE;
+EXPORT_SYMBOL(vmalloc_base);
+
+/*
+ * Memory regions randomized by KASLR (except modules that use a separate logic
+ * earlier during boot). The list is ordered based on virtual addresses. This
+ * order is kept after randomization.
+ */
+static __initdata struct kaslr_memory_region {
+       unsigned long *base;
+       unsigned long size_tb;
+} kaslr_regions[] = {
+       { &page_offset_base, 64/* Maximum */ },
+       { &vmalloc_base, VMALLOC_SIZE_TB },
+};
+
+/* Get size in bytes used by the memory region */
+static inline unsigned long get_padding(struct kaslr_memory_region *region)
+{
+       return (region->size_tb << TB_SHIFT);
+}
+
+/*
+ * Apply no randomization if KASLR was disabled at boot or if KASAN
+ * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
+ */
+static inline bool kaslr_memory_enabled(void)
+{
+       return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
+}
+
+/* Initialize base and padding for each memory region randomized with KASLR */
+void __init kernel_randomize_memory(void)
+{
+       size_t i;
+       unsigned long vaddr = vaddr_start;
+       unsigned long rand, memory_tb;
+       struct rnd_state rand_state;
+       unsigned long remain_entropy;
+
+       if (!kaslr_memory_enabled())
+               return;
+
+       /*
+        * Update Physical memory mapping to available and
+        * add padding if needed (especially for memory hotplug support).
+        */
+       BUG_ON(kaslr_regions[0].base != &page_offset_base);
+       memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) +
+               CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
+
+       /* Adapt phyiscal memory region size based on available memory */
+       if (memory_tb < kaslr_regions[0].size_tb)
+               kaslr_regions[0].size_tb = memory_tb;
+
+       /* Calculate entropy available between regions */
+       remain_entropy = vaddr_end - vaddr_start;
+       for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
+               remain_entropy -= get_padding(&kaslr_regions[i]);
+
+       prandom_seed_state(&rand_state, kaslr_get_random_long("Memory"));
+
+       for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) {
+               unsigned long entropy;
+
+               /*
+                * Select a random virtual address using the extra entropy
+                * available.
+                */
+               entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i);
+               prandom_bytes_state(&rand_state, &rand, sizeof(rand));
+               entropy = (rand % (entropy + 1)) & PUD_MASK;
+               vaddr += entropy;
+               *kaslr_regions[i].base = vaddr;
+
+               /*
+                * Jump the region and add a minimum padding based on
+                * randomization alignment.
+                */
+               vaddr += get_padding(&kaslr_regions[i]);
+               vaddr = round_up(vaddr + 1, PUD_SIZE);
+               remain_entropy -= entropy;
+       }
+}
+
+/*
+ * Create PGD aligned trampoline table to allow real mode initialization
+ * of additional CPUs. Consume only 1 low memory page.
+ */
+void __meminit init_trampoline(void)
+{
+       unsigned long paddr, paddr_next;
+       pgd_t *pgd;
+       pud_t *pud_page, *pud_page_tramp;
+       int i;
+
+       if (!kaslr_memory_enabled()) {
+               init_trampoline_default();
+               return;
+       }
+
+       pud_page_tramp = alloc_low_page();
+
+       paddr = 0;
+       pgd = pgd_offset_k((unsigned long)__va(paddr));
+       pud_page = (pud_t *) pgd_page_vaddr(*pgd);
+
+       for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) {
+               pud_t *pud, *pud_tramp;
+               unsigned long vaddr = (unsigned long)__va(paddr);
+
+               pud_tramp = pud_page_tramp + pud_index(paddr);
+               pud = pud_page + pud_index(vaddr);
+               paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
+
+               *pud_tramp = *pud;
+       }
+
+       set_pgd(&trampoline_pgd_entry,
+               __pgd(_KERNPG_TABLE | __pa(pud_page_tramp)));
+}
index 7a1f7bbf4105b6ec570c9c15497a00237c5185e9..849dc09fa4f0b803b7139015de6b98f7778165bd 100644 (file)
@@ -101,7 +101,8 @@ static inline unsigned long highmap_start_pfn(void)
 
 static inline unsigned long highmap_end_pfn(void)
 {
-       return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
+       /* Do not reference physical address outside the kernel. */
+       return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT;
 }
 
 #endif
@@ -112,6 +113,12 @@ within(unsigned long addr, unsigned long start, unsigned long end)
        return addr >= start && addr < end;
 }
 
+static inline int
+within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
+{
+       return addr >= start && addr <= end;
+}
+
 /*
  * Flushing functions
  */
@@ -746,18 +753,6 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
        return true;
 }
 
-static bool try_to_free_pud_page(pud_t *pud)
-{
-       int i;
-
-       for (i = 0; i < PTRS_PER_PUD; i++)
-               if (!pud_none(pud[i]))
-                       return false;
-
-       free_page((unsigned long)pud);
-       return true;
-}
-
 static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
 {
        pte_t *pte = pte_offset_kernel(pmd, start);
@@ -871,16 +866,6 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
         */
 }
 
-static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
-{
-       pgd_t *pgd_entry = root + pgd_index(addr);
-
-       unmap_pud_range(pgd_entry, addr, end);
-
-       if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
-               pgd_clear(pgd_entry);
-}
-
 static int alloc_pte_page(pmd_t *pmd)
 {
        pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
@@ -1113,7 +1098,12 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
 
        ret = populate_pud(cpa, addr, pgd_entry, pgprot);
        if (ret < 0) {
-               unmap_pgd_range(cpa->pgd, addr,
+               /*
+                * Leave the PUD page in place in case some other CPU or thread
+                * already found it, but remove any useless entries we just
+                * added to it.
+                */
+               unmap_pud_range(pgd_entry, addr,
                                addr + (cpa->numpages << PAGE_SHIFT));
                return ret;
        }
@@ -1185,7 +1175,7 @@ repeat:
                return __cpa_process_fault(cpa, address, primary);
 
        old_pte = *kpte;
-       if (!pte_val(old_pte))
+       if (pte_none(old_pte))
                return __cpa_process_fault(cpa, address, primary);
 
        if (level == PG_LEVEL_4K) {
@@ -1316,7 +1306,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
         * to touch the high mapped kernel as well:
         */
        if (!within(vaddr, (unsigned long)_text, _brk_end) &&
-           within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
+           within_inclusive(cpa->pfn, highmap_start_pfn(),
+                            highmap_end_pfn())) {
                unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
                                               __START_KERNEL_map - phys_base;
                alias_cpa = *cpa;
@@ -1991,12 +1982,6 @@ out:
        return retval;
 }
 
-void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
-                              unsigned numpages)
-{
-       unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
-}
-
 /*
  * The testcases use internal knowledge of the implementation that shouldn't
  * be exposed to the rest of the kernel. Include these directly here.
index fb0604f11eec268a2cc69b1d47e3885b073c6cf1..db00e3e2f3dcf5f2feb0111cb686c645a16ec71f 100644 (file)
@@ -755,11 +755,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
                return 1;
 
        while (cursor < to) {
-               if (!devmem_is_allowed(pfn)) {
-                       pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
-                               current->comm, from, to - 1);
+               if (!devmem_is_allowed(pfn))
                        return 0;
-               }
                cursor += PAGE_SIZE;
                pfn++;
        }
index 75cc0978d45d7d7acc43d65615b766c78df9ab1a..e67ae0e6c59ddb95a6368ced774840827677ccd9 100644 (file)
@@ -47,7 +47,7 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
                return;
        }
        pte = pte_offset_kernel(pmd, vaddr);
-       if (pte_val(pteval))
+       if (!pte_none(pteval))
                set_pte_at(&init_mm, vaddr, pte, pteval);
        else
                pte_clear(&init_mm, vaddr, pte);
index b2a4e2a61f6b8f41775c47b0aed020a6441e5d66..3cd69832d7f4c6f3743bbb3a190c39672c89461d 100644 (file)
@@ -396,6 +396,7 @@ int __init pci_acpi_init(void)
                return -ENODEV;
 
        printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
+       acpi_irq_penalty_init();
        pcibios_enable_irq = acpi_pci_irq_enable;
        pcibios_disable_irq = acpi_pci_irq_disable;
        x86_init.pci.init_irq = x86_init_noop;
index 8b93e634af84c4698e6f7c6ab718a018e2cdc9db..5a18aedcb3410bb461aca29998b324ef57970d3c 100644 (file)
@@ -36,7 +36,8 @@
 #define PCIE_CAP_OFFSET        0x100
 
 /* Quirks for the listed devices */
-#define PCI_DEVICE_ID_INTEL_MRFL_MMC   0x1190
+#define PCI_DEVICE_ID_INTEL_MRFLD_MMC  0x1190
+#define PCI_DEVICE_ID_INTEL_MRFLD_HSU  0x1191
 
 /* Fixed BAR fields */
 #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00        /* Fixed BAR (TBD) */
@@ -224,14 +225,21 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
 
                /* Special treatment for IRQ0 */
                if (dev->irq == 0) {
+                       /*
+                        * Skip HS UART common registers device since it has
+                        * IRQ0 assigned and not used by the kernel.
+                        */
+                       if (dev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU)
+                               return -EBUSY;
                        /*
                         * TNG has IRQ0 assigned to eMMC controller. But there
                         * are also other devices with bogus PCI configuration
                         * that have IRQ0 assigned. This check ensures that
-                        * eMMC gets it.
+                        * eMMC gets it. The rest of devices still could be
+                        * enabled without interrupt line being allocated.
                         */
-                       if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC)
-                               return -EBUSY;
+                       if (dev->device != PCI_DEVICE_ID_INTEL_MRFLD_MMC)
+                               return 0;
                }
                break;
        default:
@@ -308,14 +316,39 @@ static void pci_d3delay_fixup(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup);
 
-static void mrst_power_off_unused_dev(struct pci_dev *dev)
+static void mid_power_off_one_device(struct pci_dev *dev)
 {
+       u16 pmcsr;
+
+       /*
+        * Update current state first, otherwise PCI core enforces PCI_D0 in
+        * pci_set_power_state() for devices which status was PCI_UNKNOWN.
+        */
+       pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+       dev->current_state = (pci_power_t __force)(pmcsr & PCI_PM_CTRL_STATE_MASK);
+
        pci_set_power_state(dev, PCI_D3hot);
 }
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0801, mrst_power_off_unused_dev);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0809, mrst_power_off_unused_dev);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x080C, mrst_power_off_unused_dev);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0815, mrst_power_off_unused_dev);
+
+static void mid_power_off_devices(struct pci_dev *dev)
+{
+       int id;
+
+       if (!pci_soc_mode)
+               return;
+
+       id = intel_mid_pwr_get_lss_id(dev);
+       if (id < 0)
+               return;
+
+       /*
+        * This sets only PMCSR bits. The actual power off will happen in
+        * arch/x86/platform/intel-mid/pwr.c.
+        */
+       mid_power_off_one_device(dev);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, mid_power_off_devices);
 
 /*
  * Langwell devices reside at fixed offsets, don't try to move them.
index 81c769e806140608db9198ed501336cbcfe5605d..8ff7b9355416bdae9e8f587d989807a2582d97fc 100644 (file)
 #include <linux/seq_file.h>
 #include <linux/io.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include <asm/iosf_mbi.h>
 
-/* Power gate status reg */
-#define PWRGT_STATUS           0x61
 /* Subsystem config/status Video processor */
 #define VED_SS_PM0             0x32
 /* Subsystem config/status ISP (Image Signal Processor) */
 #define MIO_SS_PM              0x3B
 /* Shift bits for getting status for video, isp and i/o */
 #define SSS_SHIFT              24
+
+/* Power gate status reg */
+#define PWRGT_STATUS           0x61
 /* Shift bits for getting status for graphics rendering */
 #define RENDER_POS             0
 /* Shift bits for getting status for media control */
 #define MEDIA_POS              2
 /* Shift bits for getting status for Valley View/Baytrail display */
 #define VLV_DISPLAY_POS                6
+
 /* Subsystem config/status display for Cherry Trail SOC */
 #define CHT_DSP_SSS            0x36
 /* Shift bits for getting status for display */
@@ -52,6 +55,14 @@ struct punit_device {
        int sss_pos;
 };
 
+static const struct punit_device punit_device_tng[] = {
+       { "DISPLAY",    CHT_DSP_SSS,    SSS_SHIFT },
+       { "VED",        VED_SS_PM0,     SSS_SHIFT },
+       { "ISP",        ISP_SS_PM0,     SSS_SHIFT },
+       { "MIO",        MIO_SS_PM,      SSS_SHIFT },
+       { NULL }
+};
+
 static const struct punit_device punit_device_byt[] = {
        { "GFX RENDER", PWRGT_STATUS,   RENDER_POS },
        { "GFX MEDIA",  PWRGT_STATUS,   MEDIA_POS },
@@ -143,8 +154,9 @@ static void punit_dbgfs_unregister(void)
          (kernel_ulong_t)&drv_data }
 
 static const struct x86_cpu_id intel_punit_cpu_ids[] = {
-       ICPU(55, punit_device_byt), /* Valleyview, Bay Trail */
-       ICPU(76, punit_device_cht), /* Braswell, Cherry Trail */
+       ICPU(INTEL_FAM6_ATOM_SILVERMONT1, punit_device_byt),
+       ICPU(INTEL_FAM6_ATOM_MERRIFIELD1, punit_device_tng),
+       ICPU(INTEL_FAM6_ATOM_AIRMONT,     punit_device_cht),
        {}
 };
 
index f93545e7dc54e7e2aa19bf494db55eb74080b739..17c8bbd4e2f0928634e67b00e86b63c24f381494 100644 (file)
@@ -98,21 +98,6 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
        return status;
 }
 
-void efi_get_time(struct timespec *now)
-{
-       efi_status_t status;
-       efi_time_t eft;
-       efi_time_cap_t cap;
-
-       status = efi.get_time(&eft, &cap);
-       if (status != EFI_SUCCESS)
-               pr_err("Oops: efitime: can't read time!\n");
-
-       now->tv_sec = mktime(eft.year, eft.month, eft.day, eft.hour,
-                            eft.minute, eft.second);
-       now->tv_nsec = 0;
-}
-
 void __init efi_find_mirror(void)
 {
        efi_memory_desc_t *md;
@@ -978,8 +963,6 @@ static void __init __efi_enter_virtual_mode(void)
         * EFI mixed mode we need all of memory to be accessible when
         * we pass parameters to the EFI runtime services in the
         * thunking code.
-        *
-        * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
         */
        free_pages((unsigned long)new_memmap, pg_shift);
 
index 338402b91d2e5654b83c0530030d5aaa8f0a02c2..cef39b0976498ccd6ea0b5dc1dc69acd516e9ea6 100644 (file)
@@ -49,9 +49,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
        return 0;
 }
-void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
-{
-}
 
 void __init efi_map_region(efi_memory_desc_t *md)
 {
index b226b3f497f1c75a465fa73f5171a695ea5c9e95..3e12c44f88a256191c3a58513fb52e237fd7f48b 100644 (file)
@@ -285,11 +285,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
        return 0;
 }
 
-void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
-{
-       kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
-}
-
 static void __init __map_region(efi_memory_desc_t *md, u64 va)
 {
        unsigned long flags = _PAGE_RW;
@@ -466,22 +461,17 @@ extern efi_status_t efi64_thunk(u32, ...);
 #define efi_thunk(f, ...)                                              \
 ({                                                                     \
        efi_status_t __s;                                               \
-       unsigned long flags;                                            \
-       u32 func;                                                       \
-                                                                       \
-       efi_sync_low_kernel_mappings();                                 \
-       local_irq_save(flags);                                          \
+       unsigned long __flags;                                          \
+       u32 __func;                                                     \
                                                                        \
-       efi_scratch.prev_cr3 = read_cr3();                              \
-       write_cr3((unsigned long)efi_scratch.efi_pgt);                  \
-       __flush_tlb_all();                                              \
+       local_irq_save(__flags);                                        \
+       arch_efi_call_virt_setup();                                     \
                                                                        \
-       func = runtime_service32(f);                                    \
-       __s = efi64_thunk(func, __VA_ARGS__);                   \
+       __func = runtime_service32(f);                                  \
+       __s = efi64_thunk(__func, __VA_ARGS__);                         \
                                                                        \
-       write_cr3(efi_scratch.prev_cr3);                                \
-       __flush_tlb_all();                                              \
-       local_irq_restore(flags);                                       \
+       arch_efi_call_virt_teardown();                                  \
+       local_irq_restore(__flags);                                     \
                                                                        \
        __s;                                                            \
 })
index 0ce1b1913673fa9ea259013471955c1f994d829c..fa021dfab0882122189b51d15d32df8c7f503849 100644 (file)
@@ -1,4 +1,4 @@
-obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfl.o
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfld.o pwr.o
 
 # SFI specific code
 ifdef CONFIG_X86_INTEL_MID
index 91ec9f8704bfd418683f1fe1c760c10da6d10afe..fc135bf70511a6a4cd2a877ed696f907e2bad805 100644 (file)
@@ -1,3 +1,5 @@
+# Family-Level Interface Shim (FLIS)
+obj-$(subst m,y,$(CONFIG_PINCTRL_MERRIFIELD)) += platform_mrfld_pinctrl.o
 # IPC Devices
 obj-y += platform_ipc.o
 obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic.o
@@ -8,14 +10,18 @@ obj-$(subst m,y,$(CONFIG_MFD_INTEL_MSIC)) += platform_msic_battery.o
 obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
 obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
 obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
+# SPI Devices
+obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
 # I2C Devices
 obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
 obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
-obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o
 obj-$(subst m,y,$(CONFIG_INPUT_MPU3050)) += platform_mpu3050.o
 obj-$(subst m,y,$(CONFIG_INPUT_BMA150)) += platform_bma023.o
-obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
 obj-$(subst m,y,$(CONFIG_DRM_MEDFIELD)) += platform_tc35876x.o
+# I2C GPIO Expanders
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_max7315.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
+obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
 # MISC Devices
 obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
 obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_pinctrl.c
new file mode 100644 (file)
index 0000000..4de8a66
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Intel Merrifield FLIS platform device initialization file
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+
+#include <asm/intel-mid.h>
+
+#define FLIS_BASE_ADDR                 0xff0c0000
+#define FLIS_LENGTH                    0x8000
+
+static struct resource mrfld_pinctrl_mmio_resource = {
+       .start          = FLIS_BASE_ADDR,
+       .end            = FLIS_BASE_ADDR + FLIS_LENGTH - 1,
+       .flags          = IORESOURCE_MEM,
+};
+
+static struct platform_device mrfld_pinctrl_device = {
+       .name           = "pinctrl-merrifield",
+       .id             = PLATFORM_DEVID_NONE,
+       .resource       = &mrfld_pinctrl_mmio_resource,
+       .num_resources  = 1,
+};
+
+static int __init mrfld_pinctrl_init(void)
+{
+       if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+               return platform_device_register(&mrfld_pinctrl_device);
+
+       return -ENODEV;
+}
+arch_initcall(mrfld_pinctrl_init);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
new file mode 100644 (file)
index 0000000..429a941
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * PCAL9555a platform data initilization file
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *         Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/platform_data/pca953x.h>
+#include <linux/sfi.h>
+
+#include <asm/intel-mid.h>
+
+#define PCAL9555A_NUM  4
+
+static struct pca953x_platform_data pcal9555a_pdata[PCAL9555A_NUM];
+static int nr;
+
+static void __init *pcal9555a_platform_data(void *info)
+{
+       struct i2c_board_info *i2c_info = info;
+       char *type = i2c_info->type;
+       struct pca953x_platform_data *pcal9555a;
+       char base_pin_name[SFI_NAME_LEN + 1];
+       char intr_pin_name[SFI_NAME_LEN + 1];
+       int gpio_base, intr;
+
+       snprintf(base_pin_name, sizeof(base_pin_name), "%s_base", type);
+       snprintf(intr_pin_name, sizeof(intr_pin_name), "%s_int", type);
+
+       gpio_base = get_gpio_by_name(base_pin_name);
+       intr = get_gpio_by_name(intr_pin_name);
+
+       /* Check if the SFI record valid */
+       if (gpio_base == -1)
+               return NULL;
+
+       if (nr >= PCAL9555A_NUM) {
+               pr_err("%s: Too many instances, only %d supported\n", __func__,
+                      PCAL9555A_NUM);
+               return NULL;
+       }
+
+       pcal9555a = &pcal9555a_pdata[nr++];
+       pcal9555a->gpio_base = gpio_base;
+
+       if (intr >= 0) {
+               i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
+               pcal9555a->irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
+       } else {
+               i2c_info->irq = -1;
+               pcal9555a->irq_base = -1;
+       }
+
+       strcpy(type, "pcal9555a");
+       return pcal9555a;
+}
+
+static const struct devs_id pcal9555a_1_dev_id __initconst = {
+       .name                   = "pcal9555a-1",
+       .type                   = SFI_DEV_TYPE_I2C,
+       .delay                  = 1,
+       .get_platform_data      = &pcal9555a_platform_data,
+};
+
+static const struct devs_id pcal9555a_2_dev_id __initconst = {
+       .name                   = "pcal9555a-2",
+       .type                   = SFI_DEV_TYPE_I2C,
+       .delay                  = 1,
+       .get_platform_data      = &pcal9555a_platform_data,
+};
+
+static const struct devs_id pcal9555a_3_dev_id __initconst = {
+       .name                   = "pcal9555a-3",
+       .type                   = SFI_DEV_TYPE_I2C,
+       .delay                  = 1,
+       .get_platform_data      = &pcal9555a_platform_data,
+};
+
+static const struct devs_id pcal9555a_4_dev_id __initconst = {
+       .name                   = "pcal9555a-4",
+       .type                   = SFI_DEV_TYPE_I2C,
+       .delay                  = 1,
+       .get_platform_data      = &pcal9555a_platform_data,
+};
+
+sfi_device(pcal9555a_1_dev_id);
+sfi_device(pcal9555a_2_dev_id);
+sfi_device(pcal9555a_3_dev_id);
+sfi_device(pcal9555a_4_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
new file mode 100644 (file)
index 0000000..30c601b
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * spidev platform data initilization file
+ *
+ * (C) Copyright 2014, 2016 Intel Corporation
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *         Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
+
+#include <asm/intel-mid.h>
+
+#define MRFLD_SPI_DEFAULT_DMA_BURST    8
+#define MRFLD_SPI_DEFAULT_TIMEOUT      500
+
+/* GPIO pin for spidev chipselect */
+#define MRFLD_SPIDEV_GPIO_CS           111
+
+static struct pxa2xx_spi_chip spidev_spi_chip = {
+       .dma_burst_size         = MRFLD_SPI_DEFAULT_DMA_BURST,
+       .timeout                = MRFLD_SPI_DEFAULT_TIMEOUT,
+       .gpio_cs                = MRFLD_SPIDEV_GPIO_CS,
+};
+
+static void __init *spidev_platform_data(void *info)
+{
+       struct spi_board_info *spi_info = info;
+
+       spi_info->mode = SPI_MODE_0;
+       spi_info->controller_data = &spidev_spi_chip;
+
+       return NULL;
+}
+
+static const struct devs_id spidev_dev_id __initconst = {
+       .name                   = "spidev",
+       .type                   = SFI_DEV_TYPE_SPI,
+       .delay                  = 0,
+       .get_platform_data      = &spidev_platform_data,
+};
+
+sfi_device(spidev_dev_id);
index 90bb997ed0a21889fd95482bfe298862b46eaacf..abbf49c6e9d3525d9188ba7f13ef0bdfa740ff32 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
+#include <linux/regulator/machine.h>
 #include <linux/scatterlist.h>
 #include <linux/sfi.h>
 #include <linux/irq.h>
@@ -144,6 +145,15 @@ static void intel_mid_arch_setup(void)
 out:
        if (intel_mid_ops->arch_setup)
                intel_mid_ops->arch_setup();
+
+       /*
+        * Intel MID platforms are using explicitly defined regulators.
+        *
+        * Let the regulator core know that we do not have any additional
+        * regulators left. This lets it substitute unprovided regulators with
+        * dummy ones:
+        */
+       regulator_has_full_constraints();
 }
 
 /* MID systems don't have i8042 controller */
similarity index 97%
rename from arch/x86/platform/intel-mid/mrfl.c
rename to arch/x86/platform/intel-mid/mrfld.c
index bd1adc621781117b8044e555509239ee4328247f..59253db41bbc900d238e361b9fce339161be1753 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * mrfl.c: Intel Merrifield platform specific setup code
+ * Intel Merrifield platform specific setup code
  *
  * (C) Copyright 2013 Intel Corporation
  *
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
new file mode 100644 (file)
index 0000000..5bc90dd
--- /dev/null
@@ -0,0 +1,418 @@
+/*
+ * Intel MID Power Management Unit (PWRMU) device driver
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Intel MID Power Management Unit device driver handles the South Complex PCI
+ * devices such as GPDMA, SPI, I2C, PWM, and so on. By default PCI core
+ * modifies bits in PMCSR register in the PCI configuration space. This is not
+ * enough on some SoCs like Intel Tangier. In such case PCI core sets a new
+ * power state of the device in question through a PM hook registered in struct
+ * pci_platform_pm_ops (see drivers/pci/pci-mid.c).
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+#include <asm/intel-mid.h>
+
+/* Registers */
+#define PM_STS                 0x00
+#define PM_CMD                 0x04
+#define PM_ICS                 0x08
+#define PM_WKC(x)              (0x10 + (x) * 4)
+#define PM_WKS(x)              (0x18 + (x) * 4)
+#define PM_SSC(x)              (0x20 + (x) * 4)
+#define PM_SSS(x)              (0x30 + (x) * 4)
+
+/* Bits in PM_STS */
+#define PM_STS_BUSY            (1 << 8)
+
+/* Bits in PM_CMD */
+#define PM_CMD_CMD(x)          ((x) << 0)
+#define PM_CMD_IOC             (1 << 8)
+#define PM_CMD_D3cold          (1 << 21)
+
+/* List of commands */
+#define CMD_SET_CFG            0x01
+
+/* Bits in PM_ICS */
+#define PM_ICS_INT_STATUS(x)   ((x) & 0xff)
+#define PM_ICS_IE              (1 << 8)
+#define PM_ICS_IP              (1 << 9)
+#define PM_ICS_SW_INT_STS      (1 << 10)
+
+/* List of interrupts */
+#define INT_INVALID            0
+#define INT_CMD_COMPLETE       1
+#define INT_CMD_ERR            2
+#define INT_WAKE_EVENT         3
+#define INT_LSS_POWER_ERR      4
+#define INT_S0iX_MSG_ERR       5
+#define INT_NO_C6              6
+#define INT_TRIGGER_ERR                7
+#define INT_INACTIVITY         8
+
+/* South Complex devices */
+#define LSS_MAX_SHARED_DEVS    4
+#define LSS_MAX_DEVS           64
+
+#define LSS_WS_BITS            1       /* wake state width */
+#define LSS_PWS_BITS           2       /* power state width */
+
+/* Supported device IDs */
+#define PCI_DEVICE_ID_PENWELL  0x0828
+#define PCI_DEVICE_ID_TANGIER  0x11a1
+
+struct mid_pwr_dev {
+       struct pci_dev *pdev;
+       pci_power_t state;
+};
+
+struct mid_pwr {
+       struct device *dev;
+       void __iomem *regs;
+       int irq;
+       bool available;
+
+       struct mutex lock;
+       struct mid_pwr_dev lss[LSS_MAX_DEVS][LSS_MAX_SHARED_DEVS];
+};
+
+static struct mid_pwr *midpwr;
+
+static u32 mid_pwr_get_state(struct mid_pwr *pwr, int reg)
+{
+       return readl(pwr->regs + PM_SSS(reg));
+}
+
+static void mid_pwr_set_state(struct mid_pwr *pwr, int reg, u32 value)
+{
+       writel(value, pwr->regs + PM_SSC(reg));
+}
+
+static void mid_pwr_set_wake(struct mid_pwr *pwr, int reg, u32 value)
+{
+       writel(value, pwr->regs + PM_WKC(reg));
+}
+
+static void mid_pwr_interrupt_disable(struct mid_pwr *pwr)
+{
+       writel(~PM_ICS_IE, pwr->regs + PM_ICS);
+}
+
+static bool mid_pwr_is_busy(struct mid_pwr *pwr)
+{
+       return !!(readl(pwr->regs + PM_STS) & PM_STS_BUSY);
+}
+
+/* Wait 500ms that the latest PWRMU command finished */
+static int mid_pwr_wait(struct mid_pwr *pwr)
+{
+       unsigned int count = 500000;
+       bool busy;
+
+       do {
+               busy = mid_pwr_is_busy(pwr);
+               if (!busy)
+                       return 0;
+               udelay(1);
+       } while (--count);
+
+       return -EBUSY;
+}
+
+static int mid_pwr_wait_for_cmd(struct mid_pwr *pwr, u8 cmd)
+{
+       writel(PM_CMD_CMD(cmd), pwr->regs + PM_CMD);
+       return mid_pwr_wait(pwr);
+}
+
+static int __update_power_state(struct mid_pwr *pwr, int reg, int bit, int new)
+{
+       int curstate;
+       u32 power;
+       int ret;
+
+       /* Check if the device is already in desired state */
+       power = mid_pwr_get_state(pwr, reg);
+       curstate = (power >> bit) & 3;
+       if (curstate == new)
+               return 0;
+
+       /* Update the power state */
+       mid_pwr_set_state(pwr, reg, (power & ~(3 << bit)) | (new << bit));
+
+       /* Send command to SCU */
+       ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
+       if (ret)
+               return ret;
+
+       /* Check if the device is already in desired state */
+       power = mid_pwr_get_state(pwr, reg);
+       curstate = (power >> bit) & 3;
+       if (curstate != new)
+               return -EAGAIN;
+
+       return 0;
+}
+
+static pci_power_t __find_weakest_power_state(struct mid_pwr_dev *lss,
+                                             struct pci_dev *pdev,
+                                             pci_power_t state)
+{
+       pci_power_t weakest = PCI_D3hot;
+       unsigned int j;
+
+       /* Find device in cache or first free cell */
+       for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
+               if (lss[j].pdev == pdev || !lss[j].pdev)
+                       break;
+       }
+
+       /* Store the desired state in cache */
+       if (j < LSS_MAX_SHARED_DEVS) {
+               lss[j].pdev = pdev;
+               lss[j].state = state;
+       } else {
+               dev_WARN(&pdev->dev, "No room for device in PWRMU LSS cache\n");
+               weakest = state;
+       }
+
+       /* Find the power state we may use */
+       for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
+               if (lss[j].state < weakest)
+                       weakest = lss[j].state;
+       }
+
+       return weakest;
+}
+
+static int __set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
+                            pci_power_t state, int id, int reg, int bit)
+{
+       const char *name;
+       int ret;
+
+       state = __find_weakest_power_state(pwr->lss[id], pdev, state);
+       name = pci_power_name(state);
+
+       ret = __update_power_state(pwr, reg, bit, (__force int)state);
+       if (ret) {
+               dev_warn(&pdev->dev, "Can't set power state %s: %d\n", name, ret);
+               return ret;
+       }
+
+       dev_vdbg(&pdev->dev, "Set power state %s\n", name);
+       return 0;
+}
+
+static int mid_pwr_set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
+                                  pci_power_t state)
+{
+       int id, reg, bit;
+       int ret;
+
+       id = intel_mid_pwr_get_lss_id(pdev);
+       if (id < 0)
+               return id;
+
+       reg = (id * LSS_PWS_BITS) / 32;
+       bit = (id * LSS_PWS_BITS) % 32;
+
+       /* We support states between PCI_D0 and PCI_D3hot */
+       if (state < PCI_D0)
+               state = PCI_D0;
+       if (state > PCI_D3hot)
+               state = PCI_D3hot;
+
+       mutex_lock(&pwr->lock);
+       ret = __set_power_state(pwr, pdev, state, id, reg, bit);
+       mutex_unlock(&pwr->lock);
+       return ret;
+}
+
+int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+       struct mid_pwr *pwr = midpwr;
+       int ret = 0;
+
+       might_sleep();
+
+       if (pwr && pwr->available)
+               ret = mid_pwr_set_power_state(pwr, pdev, state);
+       dev_vdbg(&pdev->dev, "set_power_state() returns %d\n", ret);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
+
+int intel_mid_pwr_get_lss_id(struct pci_dev *pdev)
+{
+       int vndr;
+       u8 id;
+
+       /*
+        * Mapping to PWRMU index is kept in the Logical SubSystem ID byte of
+        * Vendor capability.
+        */
+       vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+       if (!vndr)
+               return -EINVAL;
+
+       /* Read the Logical SubSystem ID byte */
+       pci_read_config_byte(pdev, vndr + INTEL_MID_PWR_LSS_OFFSET, &id);
+       if (!(id & INTEL_MID_PWR_LSS_TYPE))
+               return -ENODEV;
+
+       id &= ~INTEL_MID_PWR_LSS_TYPE;
+       if (id >= LSS_MAX_DEVS)
+               return -ERANGE;
+
+       return id;
+}
+
+static irqreturn_t mid_pwr_irq_handler(int irq, void *dev_id)
+{
+       struct mid_pwr *pwr = dev_id;
+       u32 ics;
+
+       ics = readl(pwr->regs + PM_ICS);
+       if (!(ics & PM_ICS_IP))
+               return IRQ_NONE;
+
+       writel(ics | PM_ICS_IP, pwr->regs + PM_ICS);
+
+       dev_warn(pwr->dev, "Unexpected IRQ: %#x\n", PM_ICS_INT_STATUS(ics));
+       return IRQ_HANDLED;
+}
+
+struct mid_pwr_device_info {
+       int (*set_initial_state)(struct mid_pwr *pwr);
+};
+
+static int mid_pwr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct mid_pwr_device_info *info = (void *)id->driver_data;
+       struct device *dev = &pdev->dev;
+       struct mid_pwr *pwr;
+       int ret;
+
+       ret = pcim_enable_device(pdev);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "error: could not enable device\n");
+               return ret;
+       }
+
+       ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
+       if (ret) {
+               dev_err(&pdev->dev, "I/O memory remapping failed\n");
+               return ret;
+       }
+
+       pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
+       if (!pwr)
+               return -ENOMEM;
+
+       pwr->dev = dev;
+       pwr->regs = pcim_iomap_table(pdev)[0];
+       pwr->irq = pdev->irq;
+
+       mutex_init(&pwr->lock);
+
+       /* Disable interrupts */
+       mid_pwr_interrupt_disable(pwr);
+
+       if (info && info->set_initial_state) {
+               ret = info->set_initial_state(pwr);
+               if (ret)
+                       dev_warn(dev, "Can't set initial state: %d\n", ret);
+       }
+
+       ret = devm_request_irq(dev, pdev->irq, mid_pwr_irq_handler,
+                              IRQF_NO_SUSPEND, pci_name(pdev), pwr);
+       if (ret)
+               return ret;
+
+       pwr->available = true;
+       midpwr = pwr;
+
+       pci_set_drvdata(pdev, pwr);
+       return 0;
+}
+
+static int mid_set_initial_state(struct mid_pwr *pwr)
+{
+       unsigned int i, j;
+       int ret;
+
+       /*
+        * Enable wake events.
+        *
+        * PWRMU supports up to 32 sources for wake up the system. Ungate them
+        * all here.
+        */
+       mid_pwr_set_wake(pwr, 0, 0xffffffff);
+       mid_pwr_set_wake(pwr, 1, 0xffffffff);
+
+       /*
+        * Power off South Complex devices.
+        *
+        * There is a map (see a note below) of 64 devices with 2 bits per each
+        * on 32-bit HW registers. The following calls set all devices to one
+        * known initial state, i.e. PCI_D3hot. This is done in conjunction
+        * with PMCSR setting in arch/x86/pci/intel_mid_pci.c.
+        *
+        * NOTE: The actual device mapping is provided by a platform at run
+        * time using vendor capability of PCI configuration space.
+        */
+       mid_pwr_set_state(pwr, 0, 0xffffffff);
+       mid_pwr_set_state(pwr, 1, 0xffffffff);
+       mid_pwr_set_state(pwr, 2, 0xffffffff);
+       mid_pwr_set_state(pwr, 3, 0xffffffff);
+
+       /* Send command to SCU */
+       ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < LSS_MAX_DEVS; i++) {
+               for (j = 0; j < LSS_MAX_SHARED_DEVS; j++)
+                       pwr->lss[i][j].state = PCI_D3hot;
+       }
+
+       return 0;
+}
+
+static const struct mid_pwr_device_info mid_info = {
+       .set_initial_state = mid_set_initial_state,
+};
+
+static const struct pci_device_id mid_pwr_pci_ids[] = {
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&mid_info },
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&mid_info },
+       {}
+};
+MODULE_DEVICE_TABLE(pci, mid_pwr_pci_ids);
+
+static struct pci_driver mid_pwr_pci_driver = {
+       .name           = "intel_mid_pwr",
+       .probe          = mid_pwr_probe,
+       .id_table       = mid_pwr_pci_ids,
+};
+
+builtin_pci_driver(mid_pwr_pci_driver);
index 5ee360a951ce7d59c74738491c8711f2f8fc1446..1555672d436f364d9133d46508cee68c11e222ed 100644 (file)
@@ -407,6 +407,32 @@ static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry,
                i2c_register_board_info(pentry->host_num, &i2c_info, 1);
 }
 
+static void __init sfi_handle_sd_dev(struct sfi_device_table_entry *pentry,
+                                       struct devs_id *dev)
+{
+       struct mid_sd_board_info sd_info;
+       void *pdata;
+
+       memset(&sd_info, 0, sizeof(sd_info));
+       strncpy(sd_info.name, pentry->name, SFI_NAME_LEN);
+       sd_info.bus_num = pentry->host_num;
+       sd_info.max_clk = pentry->max_freq;
+       sd_info.addr = pentry->addr;
+       pr_debug("SD bus = %d, name = %16.16s, max_clk = %d, addr = 0x%x\n",
+                sd_info.bus_num,
+                sd_info.name,
+                sd_info.max_clk,
+                sd_info.addr);
+       pdata = intel_mid_sfi_get_pdata(dev, &sd_info);
+       if (IS_ERR(pdata))
+               return;
+
+       /* Nothing we can do with this for now */
+       sd_info.platform_data = pdata;
+
+       pr_debug("Successfully registered %16.16s", sd_info.name);
+}
+
 extern struct devs_id *const __x86_intel_mid_dev_start[],
                      *const __x86_intel_mid_dev_end[];
 
@@ -490,6 +516,9 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
                        case SFI_DEV_TYPE_I2C:
                                sfi_handle_i2c_dev(pentry, dev);
                                break;
+                       case SFI_DEV_TYPE_SD:
+                               sfi_handle_sd_dev(pentry, dev);
+                               break;
                        case SFI_DEV_TYPE_UART:
                        case SFI_DEV_TYPE_HSI:
                        default:
index 815fec6e05e2b8801f83c0aa34baef9aeeeb27ef..66b2166ea4a1c715a0362ed99cbeb3692a031476 100644 (file)
@@ -40,8 +40,7 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
                 */
                return BIOS_STATUS_UNIMPLEMENTED;
 
-       ret = efi_call((void *)__va(tab->function), (u64)which,
-                       a1, a2, a3, a4, a5);
+       ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
        return ret;
 }
 EXPORT_SYMBOL_GPL(uv_bios_call);
index 009947d419a61ae2fc41f58ad9d85ece1555fec2..f2b5e6a5cf956102905f64462db824a8a355cca5 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/mtrr.h>
 #include <asm/sections.h>
 #include <asm/suspend.h>
+#include <asm/tlbflush.h>
 
 /* Defined in hibernate_asm_64.S */
 extern asmlinkage __visible int restore_image(void);
@@ -28,6 +29,7 @@ extern asmlinkage __visible int restore_image(void);
  * kernel's text (this value is passed in the image header).
  */
 unsigned long restore_jump_address __visible;
+unsigned long jump_address_phys;
 
 /*
  * Value of the cr3 register from before the hibernation (this value is passed
@@ -37,7 +39,43 @@ unsigned long restore_cr3 __visible;
 
 pgd_t *temp_level4_pgt __visible;
 
-void *relocated_restore_code __visible;
+unsigned long relocated_restore_code __visible;
+
+static int set_up_temporary_text_mapping(void)
+{
+       pmd_t *pmd;
+       pud_t *pud;
+
+       /*
+        * The new mapping only has to cover the page containing the image
+        * kernel's entry point (jump_address_phys), because the switch over to
+        * it is carried out by relocated code running from a page allocated
+        * specifically for this purpose and covered by the identity mapping, so
+        * the temporary kernel text mapping is only needed for the final jump.
+        * Moreover, in that mapping the virtual address of the image kernel's
+        * entry point must be the same as its virtual address in the image
+        * kernel (restore_jump_address), so the image kernel's
+        * restore_registers() code doesn't find itself in a different area of
+        * the virtual address space after switching over to the original page
+        * tables used by the image kernel.
+        */
+       pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+       if (!pud)
+               return -ENOMEM;
+
+       pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+       if (!pmd)
+               return -ENOMEM;
+
+       set_pmd(pmd + pmd_index(restore_jump_address),
+               __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
+       set_pud(pud + pud_index(restore_jump_address),
+               __pud(__pa(pmd) | _KERNPG_TABLE));
+       set_pgd(temp_level4_pgt + pgd_index(restore_jump_address),
+               __pgd(__pa(pud) | _KERNPG_TABLE));
+
+       return 0;
+}
 
 static void *alloc_pgt_page(void *context)
 {
@@ -59,9 +97,10 @@ static int set_up_temporary_mappings(void)
        if (!temp_level4_pgt)
                return -ENOMEM;
 
-       /* It is safe to reuse the original kernel mapping */
-       set_pgd(temp_level4_pgt + pgd_index(__START_KERNEL_map),
-               init_level4_pgt[pgd_index(__START_KERNEL_map)]);
+       /* Prepare a temporary mapping for the kernel text */
+       result = set_up_temporary_text_mapping();
+       if (result)
+               return result;
 
        /* Set up the direct mapping from scratch */
        for (i = 0; i < nr_pfn_mapped; i++) {
@@ -78,19 +117,50 @@ static int set_up_temporary_mappings(void)
        return 0;
 }
 
+static int relocate_restore_code(void)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+
+       relocated_restore_code = get_safe_page(GFP_ATOMIC);
+       if (!relocated_restore_code)
+               return -ENOMEM;
+
+       memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
+
+       /* Make the page containing the relocated code executable */
+       pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
+       pud = pud_offset(pgd, relocated_restore_code);
+       if (pud_large(*pud)) {
+               set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
+       } else {
+               pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
+
+               if (pmd_large(*pmd)) {
+                       set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
+               } else {
+                       pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
+
+                       set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
+               }
+       }
+       __flush_tlb_all();
+
+       return 0;
+}
+
 int swsusp_arch_resume(void)
 {
        int error;
 
        /* We have got enough memory and from now on we cannot recover */
-       if ((error = set_up_temporary_mappings()))
+       error = set_up_temporary_mappings();
+       if (error)
                return error;
 
-       relocated_restore_code = (void *)get_safe_page(GFP_ATOMIC);
-       if (!relocated_restore_code)
-               return -ENOMEM;
-       memcpy(relocated_restore_code, &core_restore_code,
-              &restore_registers - &core_restore_code);
+       error = relocate_restore_code();
+       if (error)
+               return error;
 
        restore_image();
        return 0;
@@ -109,11 +179,12 @@ int pfn_is_nosave(unsigned long pfn)
 
 struct restore_data_record {
        unsigned long jump_address;
+       unsigned long jump_address_phys;
        unsigned long cr3;
        unsigned long magic;
 };
 
-#define RESTORE_MAGIC  0x0123456789ABCDEFUL
+#define RESTORE_MAGIC  0x123456789ABCDEF0UL
 
 /**
  *     arch_hibernation_header_save - populate the architecture specific part
@@ -126,7 +197,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
 
        if (max_size < sizeof(struct restore_data_record))
                return -EOVERFLOW;
-       rdr->jump_address = restore_jump_address;
+       rdr->jump_address = (unsigned long)&restore_registers;
+       rdr->jump_address_phys = __pa_symbol(&restore_registers);
        rdr->cr3 = restore_cr3;
        rdr->magic = RESTORE_MAGIC;
        return 0;
@@ -142,6 +214,7 @@ int arch_hibernation_header_restore(void *addr)
        struct restore_data_record *rdr = addr;
 
        restore_jump_address = rdr->jump_address;
+       jump_address_phys = rdr->jump_address_phys;
        restore_cr3 = rdr->cr3;
        return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
 }
index 4400a43b9e28f20aaac68c1a20091e683e31b799..3177c2bc26f63e9fe1bb82739be9c0c2e406f00f 100644 (file)
@@ -44,9 +44,6 @@ ENTRY(swsusp_arch_suspend)
        pushfq
        popq    pt_regs_flags(%rax)
 
-       /* save the address of restore_registers */
-       movq    $restore_registers, %rax
-       movq    %rax, restore_jump_address(%rip)
        /* save cr3 */
        movq    %cr3, %rax
        movq    %rax, restore_cr3(%rip)
@@ -57,31 +54,34 @@ ENTRY(swsusp_arch_suspend)
 ENDPROC(swsusp_arch_suspend)
 
 ENTRY(restore_image)
-       /* switch to temporary page tables */
-       movq    $__PAGE_OFFSET, %rdx
-       movq    temp_level4_pgt(%rip), %rax
-       subq    %rdx, %rax
-       movq    %rax, %cr3
-       /* Flush TLB */
-       movq    mmu_cr4_features(%rip), %rax
-       movq    %rax, %rdx
-       andq    $~(X86_CR4_PGE), %rdx
-       movq    %rdx, %cr4;  # turn off PGE
-       movq    %cr3, %rcx;  # flush TLB
-       movq    %rcx, %cr3;
-       movq    %rax, %cr4;  # turn PGE back on
-
        /* prepare to jump to the image kernel */
-       movq    restore_jump_address(%rip), %rax
-       movq    restore_cr3(%rip), %rbx
+       movq    restore_jump_address(%rip), %r8
+       movq    restore_cr3(%rip), %r9
+
+       /* prepare to switch to temporary page tables */
+       movq    temp_level4_pgt(%rip), %rax
+       movq    mmu_cr4_features(%rip), %rbx
 
        /* prepare to copy image data to their original locations */
        movq    restore_pblist(%rip), %rdx
+
+       /* jump to relocated restore code */
        movq    relocated_restore_code(%rip), %rcx
        jmpq    *%rcx
 
        /* code below has been relocated to a safe page */
 ENTRY(core_restore_code)
+       /* switch to temporary page tables */
+       movq    $__PAGE_OFFSET, %rcx
+       subq    %rcx, %rax
+       movq    %rax, %cr3
+       /* flush TLB */
+       movq    %rbx, %rcx
+       andq    $~(X86_CR4_PGE), %rcx
+       movq    %rcx, %cr4;  # turn off PGE
+       movq    %cr3, %rcx;  # flush TLB
+       movq    %rcx, %cr3;
+       movq    %rbx, %cr4;  # turn PGE back on
 .Lloop:
        testq   %rdx, %rdx
        jz      .Ldone
@@ -96,24 +96,17 @@ ENTRY(core_restore_code)
        /* progress to the next pbe */
        movq    pbe_next(%rdx), %rdx
        jmp     .Lloop
+
 .Ldone:
        /* jump to the restore_registers address from the image header */
-       jmpq    *%rax
-       /*
-        * NOTE: This assumes that the boot kernel's text mapping covers the
-        * image kernel's page containing restore_registers and the address of
-        * this page is the same as in the image kernel's text mapping (it
-        * should always be true, because the text mapping is linear, starting
-        * from 0, and is supposed to cover the entire kernel text for every
-        * kernel).
-        *
-        * code below belongs to the image kernel
-        */
+       jmpq    *%r8
 
+        /* code below belongs to the image kernel */
+       .align PAGE_SIZE
 ENTRY(restore_registers)
        FRAME_BEGIN
        /* go back to the original page tables */
-       movq    %rbx, %cr3
+       movq    %r9, %cr3
 
        /* Flush TLB, including "global" things (vmalloc) */
        movq    mmu_cr4_features(%rip), %rax
index e69f4701a076da1006c85532de3a585822c2578f..1104515d5ad2aadee8711425e089bce3857b8139 100644 (file)
@@ -241,6 +241,31 @@ static void toggle_nb_mca_mst_cpu(u16 nid)
                       __func__, PCI_FUNC(F3->devfn), NBCFG);
 }
 
+static void prepare_msrs(void *info)
+{
+       struct mce i_mce = *(struct mce *)info;
+       u8 b = i_mce.bank;
+
+       wrmsrl(MSR_IA32_MCG_STATUS, i_mce.mcgstatus);
+
+       if (boot_cpu_has(X86_FEATURE_SMCA)) {
+               if (i_mce.inject_flags == DFR_INT_INJ) {
+                       wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), i_mce.status);
+                       wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), i_mce.addr);
+               } else {
+                       wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), i_mce.status);
+                       wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), i_mce.addr);
+               }
+
+               wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), i_mce.misc);
+       } else {
+               wrmsrl(MSR_IA32_MCx_STATUS(b), i_mce.status);
+               wrmsrl(MSR_IA32_MCx_ADDR(b), i_mce.addr);
+               wrmsrl(MSR_IA32_MCx_MISC(b), i_mce.misc);
+       }
+
+}
+
 static void do_inject(void)
 {
        u64 mcg_status = 0;
@@ -287,36 +312,9 @@ static void do_inject(void)
 
        toggle_hw_mce_inject(cpu, true);
 
-       wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS,
-                    (u32)mcg_status, (u32)(mcg_status >> 32));
-
-       if (boot_cpu_has(X86_FEATURE_SMCA)) {
-               if (inj_type == DFR_INT_INJ) {
-                       wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DESTAT(b),
-                                    (u32)i_mce.status, (u32)(i_mce.status >> 32));
-
-                       wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DEADDR(b),
-                                    (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
-               } else {
-                       wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_STATUS(b),
-                                    (u32)i_mce.status, (u32)(i_mce.status >> 32));
-
-                       wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_ADDR(b),
-                                    (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
-               }
-
-               wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(b),
-                            (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
-       } else {
-               wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
-                            (u32)i_mce.status, (u32)(i_mce.status >> 32));
-
-               wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
-                            (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
-
-               wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
-                            (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
-       }
+       i_mce.mcgstatus = mcg_status;
+       i_mce.inject_flags = inj_type;
+       smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
 
        toggle_hw_mce_inject(cpu, false);
 
index 0b7a63d9844038a02492ae5ddf0847b51d51bdd7..705e3fffb4a1a3296ac5745603681cc434c44d92 100644 (file)
@@ -8,6 +8,9 @@
 struct real_mode_header *real_mode_header;
 u32 *trampoline_cr4_features;
 
+/* Hold the pgd entry used on booting additional CPUs */
+pgd_t trampoline_pgd_entry;
+
 void __init reserve_real_mode(void)
 {
        phys_addr_t mem;
@@ -84,7 +87,7 @@ void __init setup_real_mode(void)
        *trampoline_cr4_features = __read_cr4();
 
        trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
-       trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
+       trampoline_pgd[0] = trampoline_pgd_entry.pgd;
        trampoline_pgd[511] = init_level4_pgt[511].pgd;
 #endif
 }
index db52a7fafcc2ce7dc9b4294065ef9807f9a311a1..44c88ad1841a3696a7225965f16760820bea938a 100644 (file)
@@ -177,7 +177,6 @@ static struct apic xen_pv_apic = {
 
        .get_apic_id                    = xen_get_apic_id,
        .set_apic_id                    = xen_set_apic_id, /* Can be NULL on 32-bit. */
-       .apic_id_mask                   = 0xFF << 24, /* Used by verify_local_APIC. Match with what xen_get_apic_id does. */
 
        .cpu_mask_to_apicid_and         = flat_cpu_mask_to_apicid_and,
 
index 760789ae8562af21932adc93e56dca10717915ab..0f87db2cc6a88fa765718a91219352832b63fc11 100644 (file)
@@ -521,9 +521,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
 
        preempt_disable();
 
-       pagefault_disable();    /* Avoid warnings due to being atomic. */
-       __get_user(dummy, (unsigned char __user __force *)v);
-       pagefault_enable();
+       probe_kernel_read(&dummy, v, 1);
 
        if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
                BUG();
index fd8017ce298afcc54aedae0763e36d71377a02b5..e7a23f2a519af7be68a9db30b4c112fb344efdda 100644 (file)
@@ -98,6 +98,26 @@ static inline int atomic_##op##_return(int i, atomic_t * v)          \
        return result;                                                  \
 }
 
+#define ATOMIC_FETCH_OP(op)                                            \
+static inline int atomic_fetch_##op(int i, atomic_t * v)               \
+{                                                                      \
+       unsigned long tmp;                                              \
+       int result;                                                     \
+                                                                       \
+       __asm__ __volatile__(                                           \
+                       "1:     l32i    %1, %3, 0\n"                    \
+                       "       wsr     %1, scompare1\n"                \
+                       "       " #op " %0, %1, %2\n"                   \
+                       "       s32c1i  %0, %3, 0\n"                    \
+                       "       bne     %0, %1, 1b\n"                   \
+                       : "=&a" (result), "=&a" (tmp)                   \
+                       : "a" (i), "a" (v)                              \
+                       : "memory"                                      \
+                       );                                              \
+                                                                       \
+       return result;                                                  \
+}
+
 #else /* XCHAL_HAVE_S32C1I */
 
 #define ATOMIC_OP(op)                                                  \
@@ -138,18 +158,42 @@ static inline int atomic_##op##_return(int i, atomic_t * v)               \
        return vval;                                                    \
 }
 
+#define ATOMIC_FETCH_OP(op)                                            \
+static inline int atomic_fetch_##op(int i, atomic_t * v)               \
+{                                                                      \
+       unsigned int tmp, vval;                                         \
+                                                                       \
+       __asm__ __volatile__(                                           \
+                       "       rsil    a15,"__stringify(TOPLEVEL)"\n"  \
+                       "       l32i    %0, %3, 0\n"                    \
+                       "       " #op " %1, %0, %2\n"                   \
+                       "       s32i    %1, %3, 0\n"                    \
+                       "       wsr     a15, ps\n"                      \
+                       "       rsync\n"                                \
+                       : "=&a" (vval), "=&a" (tmp)                     \
+                       : "a" (i), "a" (v)                              \
+                       : "a15", "memory"                               \
+                       );                                              \
+                                                                       \
+       return vval;                                                    \
+}
+
 #endif /* XCHAL_HAVE_S32C1I */
 
-#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
 
 ATOMIC_OPS(add)
 ATOMIC_OPS(sub)
 
-ATOMIC_OP(and)
-ATOMIC_OP(or)
-ATOMIC_OP(xor)
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
+
+ATOMIC_OPS(and)
+ATOMIC_OPS(or)
+ATOMIC_OPS(xor)
 
 #undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index 1d95fa5dcd10f0121f394e22fb108682c17741aa..a36221cf63637fc2e066850e937bc138698af973 100644 (file)
@@ -11,6 +11,9 @@
 #ifndef _XTENSA_SPINLOCK_H
 #define _XTENSA_SPINLOCK_H
 
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
 /*
  * spinlock
  *
  */
 
 #define arch_spin_is_locked(x) ((x)->slock != 0)
-#define arch_spin_unlock_wait(lock) \
-       do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->slock, !VAL);
+}
 
 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 
index 5f4bd71971d6a317562289f2e9156f60fd07ed02..4904c5c16918c146413d4c00af0febf37b25a0dc 100644 (file)
@@ -113,7 +113,6 @@ void platform_heartbeat(void)
 }
 
 //#define RS_TABLE_SIZE 2
-//#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF|UPF_SKIP_TEST)
 
 #define _SERIAL_PORT(_base,_irq)                                       \
 {                                                                      \
index 9f42526b4d62bca142045d74431b69143adfffef..f06d7f3b075bf202fe34b77fa8fbdd01c67f0ee0 100644 (file)
@@ -1523,12 +1523,7 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now)
        if (--ev->block)
                goto out_unlock;
 
-       /*
-        * Not exactly a latency critical operation, set poll timer
-        * slack to 25% and kick event check.
-        */
        intv = disk_events_poll_jiffies(disk);
-       set_timer_slack(&ev->dwork.timer, intv / 4);
        if (check_now)
                queue_delayed_work(system_freezable_power_efficient_wq,
                                &ev->dwork, 0);
index cc7800e9eb441e2b7737a152f0dbb60182821408..01b8116298a13b5463e7969ce66c0b037bcfccd5 100644 (file)
@@ -150,8 +150,10 @@ static int get_task_ioprio(struct task_struct *p)
        if (ret)
                goto out;
        ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
+       task_lock(p);
        if (p->io_context)
                ret = p->io_context->ioprio;
+       task_unlock(p);
 out:
        return ret;
 }
index 6a76d5c70ef6e1ddb742bfc94671a499184c891d..9492e1c22d3891417d3e42baf2e99661041c1135 100644 (file)
@@ -124,5 +124,10 @@ int mscode_note_digest(void *context, size_t hdrlen,
        struct pefile_context *ctx = context;
 
        ctx->digest = kmemdup(value, vlen, GFP_KERNEL);
-       return ctx->digest ? 0 : -ENOMEM;
+       if (!ctx->digest)
+               return -ENOMEM;
+
+       ctx->digest_len = vlen;
+
+       return 0;
 }
index 44b746e9df1b4110e6f8da4ac5b23419cb63feac..2ffd69769466082eaf55cdfe71fb67704e0364af 100644 (file)
@@ -227,7 +227,7 @@ static int pkcs7_verify_sig_chain(struct pkcs7_message *pkcs7,
                                if (asymmetric_key_id_same(p->id, auth))
                                        goto found_issuer_check_skid;
                        }
-               } else {
+               } else if (sig->auth_ids[1]) {
                        auth = sig->auth_ids[1];
                        pr_debug("- want %*phN\n", auth->len, auth->data);
                        for (p = pkcs7->certs; p; p = p->next) {
index ac4bddf669de2195bce0864a28308031602245da..19d1afb9890f660e43ee95261cf0a703e44f92c6 100644 (file)
@@ -87,7 +87,7 @@ int restrict_link_by_signature(struct key *trust_keyring,
 
        sig = payload->data[asym_auth];
        if (!sig->auth_ids[0] && !sig->auth_ids[1])
-               return 0;
+               return -ENOKEY;
 
        if (ca_keyid && !asymmetric_key_id_partial(sig->auth_ids[1], ca_keyid))
                return -EPERM;
index ead8dc0d084e749e35733abc164f1e209aca3f6f..8ba426635b1b39631824f7bb2b1f2e3a056bafa4 100644 (file)
@@ -102,10 +102,10 @@ struct pkcs1pad_inst_ctx {
 };
 
 struct pkcs1pad_request {
-       struct akcipher_request child_req;
-
        struct scatterlist in_sg[3], out_sg[2];
        uint8_t *in_buf, *out_buf;
+
+       struct akcipher_request child_req;
 };
 
 static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
index 0b6f3d60193dcc2f2cdfaabe2f37e23a0b8f48a9..a7187b999c5e68cc7f04620718996bae9ce138fe 100644 (file)
@@ -128,7 +128,6 @@ obj-$(CONFIG_SGI_SN)                += sn/
 obj-y                          += firmware/
 obj-$(CONFIG_CRYPTO)           += crypto/
 obj-$(CONFIG_SUPERH)           += sh/
-obj-$(CONFIG_ARCH_SHMOBILE)    += sh/
 ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
 obj-y                          += clocksource/
 endif
index 1f4128487dd4be1b342df522e010de6c29c6756b..dee86925a9a107f028ef14adf110eb583500c0c7 100644 (file)
@@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
        crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
        ret = n;
 out:
-       acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
+       acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
        return ret;
 }
 
@@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
        crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
        ret = n;
 out:
-       acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
+       acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
        return n;
 }
 
index 0872d5fecb82f2dee893c7feb0ac6a408f903384..357a0b8f860b378850939b7ff83a018881d913d0 100644 (file)
@@ -29,6 +29,7 @@ ACPI_MODULE_NAME("acpi_lpss");
 #ifdef CONFIG_X86_INTEL_LPSS
 
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include <asm/iosf_mbi.h>
 #include <asm/pmc_atom.h>
 
@@ -229,8 +230,8 @@ static const struct lpss_device_desc bsw_spi_dev_desc = {
 #define ICPU(model)    { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
 
 static const struct x86_cpu_id lpss_cpu_ids[] = {
-       ICPU(0x37),     /* Valleyview, Bay Trail */
-       ICPU(0x4c),     /* Braswell, Cherry Trail */
+       ICPU(INTEL_FAM6_ATOM_SILVERMONT1),      /* Valleyview, Bay Trail */
+       ICPU(INTEL_FAM6_ATOM_AIRMONT),  /* Braswell, Cherry Trail */
        {}
 };
 
index 21932d640a41f6f5b060b8fcb64fbc2d9ac131ab..a1d177d58254cd8e201836481a51b809ec98c628 100644 (file)
@@ -108,9 +108,7 @@ acpi_ex_add_table(u32 table_index,
 
        /* Add the table to the namespace */
 
-       acpi_ex_exit_interpreter();
        status = acpi_ns_load_table(table_index, parent_node);
-       acpi_ex_enter_interpreter();
        if (ACPI_FAILURE(status)) {
                acpi_ut_remove_reference(obj_desc);
                *ddb_handle = NULL;
index 1783cd7e14467b8f43e9f087884b3b847e6b5492..f631a47724f05332644a54e290d9667b4b326010 100644 (file)
@@ -47,7 +47,6 @@
 #include "acparser.h"
 #include "acdispat.h"
 #include "actables.h"
-#include "acinterp.h"
 
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsparse")
@@ -171,8 +170,6 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
 
        ACPI_FUNCTION_TRACE(ns_parse_table);
 
-       acpi_ex_enter_interpreter();
-
        /*
         * AML Parse, pass 1
         *
@@ -188,7 +185,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
        status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
                                            table_index, start_node);
        if (ACPI_FAILURE(status)) {
-               goto error_exit;
+               return_ACPI_STATUS(status);
        }
 
        /*
@@ -204,10 +201,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
        status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
                                            table_index, start_node);
        if (ACPI_FAILURE(status)) {
-               goto error_exit;
+               return_ACPI_STATUS(status);
        }
 
-error_exit:
-       acpi_ex_exit_interpreter();
        return_ACPI_STATUS(status);
 }
index 73c76d646064700dc1bb51905b4dbc70b415aabf..290d6f5be44b414b7d89ec0a48be1b234330c254 100644 (file)
@@ -1331,8 +1331,6 @@ static int ec_install_handlers(struct acpi_ec *ec)
 
 static void ec_remove_handlers(struct acpi_ec *ec)
 {
-       acpi_ec_stop(ec, false);
-
        if (test_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags)) {
                if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
                                        ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
@@ -1340,6 +1338,19 @@ static void ec_remove_handlers(struct acpi_ec *ec)
                clear_bit(EC_FLAGS_EC_HANDLER_INSTALLED, &ec->flags);
        }
 
+       /*
+        * Stops handling the EC transactions after removing the operation
+        * region handler. This is required because _REG(DISCONNECT)
+        * invoked during the removal can result in new EC transactions.
+        *
+        * Flushes the EC requests and thus disables the GPE before
+        * removing the GPE handler. This is required by the current ACPICA
+        * GPE core. ACPICA GPE core will automatically disable a GPE when
+        * it is indicated but there is no way to handle it. So the drivers
+        * must disable the GPEs prior to removing the GPE handlers.
+        */
+       acpi_ec_stop(ec, false);
+
        if (test_bit(EC_FLAGS_GPE_HANDLER_INSTALLED, &ec->flags)) {
                if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
                                        &acpi_ec_gpe_handler)))
index ac6ddcc080d480b65073a2ab8c247f4589ecd99d..1f0e06065ae6c5180d2e8deae426aef0966fa3dd 100644 (file)
@@ -1131,11 +1131,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
 
        /*
         * Until standardization materializes we need to consider up to 3
-        * different command sets.  Note, that checking for zero functions
-        * tells us if any commands might be reachable through this uuid.
+        * different command sets.  Note, that checking for function0 (bit0)
+        * tells us if any commands are reachable through this uuid.
         */
        for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_HPE2; i++)
-               if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 0))
+               if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
                        break;
 
        /* limit the supported commands to those that are publicly documented */
@@ -1151,9 +1151,10 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
                if (disable_vendor_specific)
                        dsm_mask &= ~(1 << 8);
        } else {
-               dev_err(dev, "unknown dimm command family\n");
+               dev_dbg(dev, "unknown dimm command family\n");
                nfit_mem->family = -1;
-               return force_enable_dimms ? 0 : -ENODEV;
+               /* DSMs are optional, continue loading the driver... */
+               return 0;
        }
 
        uuid = to_nfit_uuid(nfit_mem->family);
index 4ed4061813e68fa2bccc266266d433ce6c0e386c..c983bf733ad37d7b608c9410108dcef32cdbf02b 100644 (file)
@@ -470,6 +470,7 @@ static int acpi_irq_pci_sharing_penalty(int irq)
 {
        struct acpi_pci_link *link;
        int penalty = 0;
+       int i;
 
        list_for_each_entry(link, &acpi_link_list, list) {
                /*
@@ -478,18 +479,14 @@ static int acpi_irq_pci_sharing_penalty(int irq)
                 */
                if (link->irq.active && link->irq.active == irq)
                        penalty += PIRQ_PENALTY_PCI_USING;
-               else {
-                       int i;
-
-                       /*
-                        * If a link is inactive, penalize the IRQs it
-                        * might use, but not as severely.
-                        */
-                       for (i = 0; i < link->irq.possible_count; i++)
-                               if (link->irq.possible[i] == irq)
-                                       penalty += PIRQ_PENALTY_PCI_POSSIBLE /
-                                               link->irq.possible_count;
-               }
+
+               /*
+                * penalize the IRQs PCI might use, but not as severely.
+                */
+               for (i = 0; i < link->irq.possible_count; i++)
+                       if (link->irq.possible[i] == irq)
+                               penalty += PIRQ_PENALTY_PCI_POSSIBLE /
+                                       link->irq.possible_count;
        }
 
        return penalty;
@@ -499,9 +496,6 @@ static int acpi_irq_get_penalty(int irq)
 {
        int penalty = 0;
 
-       if (irq < ACPI_MAX_ISA_IRQS)
-               penalty += acpi_isa_irq_penalty[irq];
-
        /*
        * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict
        * with PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be
@@ -516,10 +510,49 @@ static int acpi_irq_get_penalty(int irq)
                        penalty += PIRQ_PENALTY_PCI_USING;
        }
 
+       if (irq < ACPI_MAX_ISA_IRQS)
+               return penalty + acpi_isa_irq_penalty[irq];
+
        penalty += acpi_irq_pci_sharing_penalty(irq);
        return penalty;
 }
 
+int __init acpi_irq_penalty_init(void)
+{
+       struct acpi_pci_link *link;
+       int i;
+
+       /*
+        * Update penalties to facilitate IRQ balancing.
+        */
+       list_for_each_entry(link, &acpi_link_list, list) {
+
+               /*
+                * reflect the possible and active irqs in the penalty table --
+                * useful for breaking ties.
+                */
+               if (link->irq.possible_count) {
+                       int penalty =
+                           PIRQ_PENALTY_PCI_POSSIBLE /
+                           link->irq.possible_count;
+
+                       for (i = 0; i < link->irq.possible_count; i++) {
+                               if (link->irq.possible[i] < ACPI_MAX_ISA_IRQS)
+                                       acpi_isa_irq_penalty[link->irq.
+                                                        possible[i]] +=
+                                           penalty;
+                       }
+
+               } else if (link->irq.active &&
+                               (link->irq.active < ACPI_MAX_ISA_IRQS)) {
+                       acpi_isa_irq_penalty[link->irq.active] +=
+                           PIRQ_PENALTY_PCI_POSSIBLE;
+               }
+       }
+
+       return 0;
+}
+
 static int acpi_irq_balance = -1;      /* 0: static, 1: balance */
 
 static int acpi_pci_link_allocate(struct acpi_pci_link *link)
index b4de130f2d57419b4979d47d570467c850c04b57..22c09952e177918fdefa400b02a51f483c6fb880 100644 (file)
@@ -680,6 +680,9 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
        u64 mask = 0;
        union acpi_object *obj;
 
+       if (funcs == 0)
+               return false;
+
        obj = acpi_evaluate_dsm(handle, uuid, rev, 0, NULL);
        if (!obj)
                return false;
@@ -692,9 +695,6 @@ bool acpi_check_dsm(acpi_handle handle, const u8 *uuid, u64 rev, u64 funcs)
                        mask |= (((u64)obj->buffer.pointer[i]) << (i * 8));
        ACPI_FREE(obj);
 
-       if (funcs == 0)
-               return true;
-
        /*
         * Bit 0 indicates whether there's support for any functions other than
         * function 0 for the specified UUID and revision.
index 6be7770f68e9a16a2aa8555727358c7cf1df5735..31c183aed368c69c9544c92ecb19853548fb8784 100644 (file)
@@ -4314,6 +4314,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         */
        { "ST380013AS",         "3.20",         ATA_HORKAGE_MAX_SEC_1024 },
 
+       /*
+        * Device times out with higher max sects.
+        * https://bugzilla.kernel.org/show_bug.cgi?id=121671
+        */
+       { "LITEON CX1-JB256-HP", NULL,          ATA_HORKAGE_MAX_SEC_1024 },
+
        /* Devices we expect to fail diagnostics */
 
        /* Devices where NCQ should be avoided */
index eda09090cb523f5ddc2a6c174502110ae8999a57..f642c4264c277bc05d98dc99eb15ac8091886ba5 100644 (file)
@@ -8,8 +8,6 @@
 #include <linux/bcma/bcma.h>
 #include <linux/delay.h>
 
-#define BCMA_CORE_SIZE         0x1000
-
 #define bcma_err(bus, fmt, ...) \
        pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
 #define bcma_warn(bus, fmt, ...) \
index 2e6d1e9c3345f190b01f9d9caf8d27e6f5f8b9ce..fcc5b4e0aef29ed8d5e863e0277687e1abe700e6 100644 (file)
@@ -207,6 +207,9 @@ struct blkfront_info
        struct blk_mq_tag_set tag_set;
        struct blkfront_ring_info *rinfo;
        unsigned int nr_rings;
+       /* Save uncomplete reqs and bios for migration. */
+       struct list_head requests;
+       struct bio_list bio_list;
 };
 
 static unsigned int nr_minors;
@@ -2002,69 +2005,22 @@ static int blkif_recover(struct blkfront_info *info)
 {
        unsigned int i, r_index;
        struct request *req, *n;
-       struct blk_shadow *copy;
        int rc;
        struct bio *bio, *cloned_bio;
-       struct bio_list bio_list, merge_bio;
        unsigned int segs, offset;
        int pending, size;
        struct split_bio *split_bio;
-       struct list_head requests;
 
        blkfront_gather_backend_features(info);
        segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
        blk_queue_max_segments(info->rq, segs);
-       bio_list_init(&bio_list);
-       INIT_LIST_HEAD(&requests);
 
        for (r_index = 0; r_index < info->nr_rings; r_index++) {
-               struct blkfront_ring_info *rinfo;
-
-               rinfo = &info->rinfo[r_index];
-               /* Stage 1: Make a safe copy of the shadow state. */
-               copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
-                              GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
-               if (!copy)
-                       return -ENOMEM;
-
-               /* Stage 2: Set up free list. */
-               memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
-               for (i = 0; i < BLK_RING_SIZE(info); i++)
-                       rinfo->shadow[i].req.u.rw.id = i+1;
-               rinfo->shadow_free = rinfo->ring.req_prod_pvt;
-               rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
+               struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
 
                rc = blkfront_setup_indirect(rinfo);
-               if (rc) {
-                       kfree(copy);
+               if (rc)
                        return rc;
-               }
-
-               for (i = 0; i < BLK_RING_SIZE(info); i++) {
-                       /* Not in use? */
-                       if (!copy[i].request)
-                               continue;
-
-                       /*
-                        * Get the bios in the request so we can re-queue them.
-                        */
-                       if (copy[i].request->cmd_flags &
-                           (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
-                               /*
-                                * Flush operations don't contain bios, so
-                                * we need to requeue the whole request
-                                */
-                               list_add(&copy[i].request->queuelist, &requests);
-                               continue;
-                       }
-                       merge_bio.head = copy[i].request->bio;
-                       merge_bio.tail = copy[i].request->biotail;
-                       bio_list_merge(&bio_list, &merge_bio);
-                       copy[i].request->bio = NULL;
-                       blk_end_request_all(copy[i].request, 0);
-               }
-
-               kfree(copy);
        }
        xenbus_switch_state(info->xbdev, XenbusStateConnected);
 
@@ -2079,7 +2035,7 @@ static int blkif_recover(struct blkfront_info *info)
                kick_pending_request_queues(rinfo);
        }
 
-       list_for_each_entry_safe(req, n, &requests, queuelist) {
+       list_for_each_entry_safe(req, n, &info->requests, queuelist) {
                /* Requeue pending requests (flush or discard) */
                list_del_init(&req->queuelist);
                BUG_ON(req->nr_phys_segments > segs);
@@ -2087,7 +2043,7 @@ static int blkif_recover(struct blkfront_info *info)
        }
        blk_mq_kick_requeue_list(info->rq);
 
-       while ((bio = bio_list_pop(&bio_list)) != NULL) {
+       while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
                /* Traverse the list of pending bios and re-queue them */
                if (bio_segments(bio) > segs) {
                        /*
@@ -2133,9 +2089,42 @@ static int blkfront_resume(struct xenbus_device *dev)
 {
        struct blkfront_info *info = dev_get_drvdata(&dev->dev);
        int err = 0;
+       unsigned int i, j;
 
        dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
 
+       bio_list_init(&info->bio_list);
+       INIT_LIST_HEAD(&info->requests);
+       for (i = 0; i < info->nr_rings; i++) {
+               struct blkfront_ring_info *rinfo = &info->rinfo[i];
+               struct bio_list merge_bio;
+               struct blk_shadow *shadow = rinfo->shadow;
+
+               for (j = 0; j < BLK_RING_SIZE(info); j++) {
+                       /* Not in use? */
+                       if (!shadow[j].request)
+                               continue;
+
+                       /*
+                        * Get the bios in the request so we can re-queue them.
+                        */
+                       if (shadow[j].request->cmd_flags &
+                                       (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
+                               /*
+                                * Flush operations don't contain bios, so
+                                * we need to requeue the whole request
+                                */
+                               list_add(&shadow[j].request->queuelist, &info->requests);
+                               continue;
+                       }
+                       merge_bio.head = shadow[j].request->bio;
+                       merge_bio.tail = shadow[j].request->biotail;
+                       bio_list_merge(&info->bio_list, &merge_bio);
+                       shadow[j].request->bio = NULL;
+                       blk_mq_end_request(shadow[j].request, 0);
+               }
+       }
+
        blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
 
        err = negotiate_mq(info);
index 8bf70e8c3f790635328fbd79ae75331701c4f1a0..50aa9ba91f255c6a99f1b385ecd065cbc5541d24 100644 (file)
@@ -325,7 +325,7 @@ static long dsp56k_ioctl(struct file *file, unsigned int cmd,
                        if(get_user(bin, &binary->bin) < 0)
                                return -EFAULT;
                
-                       if (len == 0) {
+                       if (len <= 0) {
                                return -EINVAL;      /* nothing to upload?!? */
                        }
                        if (len > DSP56K_MAX_BINARY_LENGTH) {
index 71025c2f6bbb072ff27b1d21a4fdca544b5a2e97..d633974e7f8b1d82914743e0a27bda495e1ff5af 100644 (file)
@@ -66,12 +66,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        u64 cursor = from;
 
        while (cursor < to) {
-               if (!devmem_is_allowed(pfn)) {
-                       printk(KERN_INFO
-               "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
-                               current->comm, from, to);
+               if (!devmem_is_allowed(pfn))
                        return 0;
-               }
                cursor += PAGE_SIZE;
                pfn++;
        }
index 10f846cc8db172c5491ddc2508f7084ac5ef5e71..25d5906640c365fe48c618361cb23c98e6a69897 100644 (file)
@@ -99,7 +99,7 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index)
        struct clk_programmable *prog = to_clk_programmable(hw);
        const struct clk_programmable_layout *layout = prog->layout;
        unsigned int mask = layout->css_mask;
-       unsigned int pckr = 0;
+       unsigned int pckr = index;
 
        if (layout->have_slck_mck)
                mask |= AT91_PMC_CSSMCK_MCK;
index 445a7498d6df8f590ef14f447e39dc3e72392a77..9780fac6d029b9b1b85a03ce3d489b31be598281 100644 (file)
@@ -33,6 +33,8 @@ struct sun4i_a10_display_clk_data {
 
        u8      width_div;
        u8      width_mux;
+
+       u32     flags;
 };
 
 struct reset_data {
@@ -166,7 +168,7 @@ static void __init sun4i_a10_display_init(struct device_node *node,
                                     data->has_div ? &div->hw : NULL,
                                     data->has_div ? &clk_divider_ops : NULL,
                                     &gate->hw, &clk_gate_ops,
-                                    0);
+                                    data->flags);
        if (IS_ERR(clk)) {
                pr_err("%s: Couldn't register the clock\n", clk_name);
                goto free_div;
@@ -232,6 +234,7 @@ static const struct sun4i_a10_display_clk_data sun4i_a10_tcon_ch0_data __initcon
        .offset_rst     = 29,
        .offset_mux     = 24,
        .width_mux      = 2,
+       .flags          = CLK_SET_RATE_PARENT,
 };
 
 static void __init sun4i_a10_tcon_ch0_setup(struct device_node *node)
index 98a4582de56a27c370848b240cb044345e145dc3..b6d29d1bedcaead534320d5a2cad836005821aac 100644 (file)
@@ -79,15 +79,11 @@ static int tcon_ch1_is_enabled(struct clk_hw *hw)
 static u8 tcon_ch1_get_parent(struct clk_hw *hw)
 {
        struct tcon_ch1_clk *tclk = hw_to_tclk(hw);
-       int num_parents = clk_hw_get_num_parents(hw);
        u32 reg;
 
        reg = readl(tclk->reg) >> TCON_CH1_SCLK2_MUX_SHIFT;
        reg &= reg >> TCON_CH1_SCLK2_MUX_MASK;
 
-       if (reg >= num_parents)
-               return -EINVAL;
-
        return reg;
 }
 
index 47352d25c15eb8c747f4582c3518e07af1d0c167..567788664723d5b341000af69cc4e6194ab9025d 100644 (file)
@@ -27,6 +27,20 @@ config CLKBLD_I8253
 config CLKSRC_MMIO
        bool
 
+config BCM2835_TIMER
+       bool "BCM2835 timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       help
+         Enables the support for the BCM2835 timer driver.
+
+config BCM_KONA_TIMER
+       bool "BCM mobile timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       help
+         Enables the support for the BCM Kona mobile timer driver.
+
 config DIGICOLOR_TIMER
        bool "Digicolor timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
@@ -141,6 +155,72 @@ config CLKSRC_DBX500_PRCMU
        help
          Use the always on PRCMU Timer as clocksource
 
+config CLPS711X_TIMER
+       bool "Cirrus logic timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       help
+         Enables support for the Cirrus Logic PS711 timer.
+
+config ATLAS7_TIMER
+       bool "Atlas7 timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       help
+         Enables support for the Atlas7 timer.
+
+config MOXART_TIMER
+       bool "Moxart timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       help
+         Enables support for the Moxart timer.
+
+config MXS_TIMER
+       bool "Mxs timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       select STMP_DEVICE
+       help
+         Enables support for the Mxs timer.
+
+config PRIMA2_TIMER
+       bool "Prima2 timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       help
+         Enables support for the Prima2 timer.
+
+config U300_TIMER
+       bool "U300 timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       depends on ARM
+       select CLKSRC_MMIO
+       help
+         Enables support for the U300 timer.
+
+config NSPIRE_TIMER
+       bool "NSpire timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       help
+         Enables support for the Nspire timer.
+
+config KEYSTONE_TIMER
+       bool "Keystone timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       depends on ARM || ARM64
+       select CLKSRC_MMIO
+       help
+         Enables support for the Keystone timer.
+
+config INTEGRATOR_AP_TIMER
+       bool "Integrator-ap timer driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       help
+         Enables support for the Integrator-ap timer.
+
 config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
        bool "Clocksource PRCMU Timer sched_clock"
        depends on (CLKSRC_DBX500_PRCMU && !CLKSRC_NOMADIK_MTU_SCHED_CLOCK)
@@ -208,14 +288,16 @@ config ARM_ARCH_TIMER
        select CLKSRC_ACPI if ACPI
 
 config ARM_ARCH_TIMER_EVTSTREAM
-       bool "Support for ARM architected timer event stream generation"
+       bool "Enable ARM architected timer event stream generation by default"
        default y if ARM_ARCH_TIMER
        depends on ARM_ARCH_TIMER
        help
-         This option enables support for event stream generation based on
-         the ARM architected timer. It is used for waking up CPUs executing
-         the wfe instruction at a frequency represented as a power-of-2
-         divisor of the clock rate.
+         This option enables support by default for event stream generation
+         based on the ARM architected timer. It is used for waking up CPUs
+         executing the wfe instruction at a frequency represented as a
+         power-of-2 divisor of the clock rate. The behaviour can also be
+         overridden on the command line using the
+         clocksource.arm_arch_timer.evtstream parameter.
          The main use of the event stream is wfe-based timeouts of userspace
          locking implementations. It might also be useful for imposing timeout
          on wfe to safeguard against any programming errors in case an expected
@@ -224,8 +306,9 @@ config ARM_ARCH_TIMER_EVTSTREAM
          hardware anomalies of missing events.
 
 config ARM_GLOBAL_TIMER
-       bool
+       bool "Support for the ARM global timer" if COMPILE_TEST
        select CLKSRC_OF if OF
+       depends on ARM
        help
          This options enables support for the ARM global timer unit
 
@@ -243,7 +326,7 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
         Use ARM global timer clock source as sched_clock
 
 config ARMV7M_SYSTICK
-       bool
+       bool "Support for the ARMv7M system time" if COMPILE_TEST
        select CLKSRC_OF if OF
        select CLKSRC_MMIO
        help
@@ -254,9 +337,12 @@ config ATMEL_PIT
        def_bool SOC_AT91SAM9 || SOC_SAMA5
 
 config ATMEL_ST
-       bool
+       bool "Atmel ST timer support" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
        select CLKSRC_OF
        select MFD_SYSCON
+       help
+         Support for the Atmel ST timer.
 
 config CLKSRC_METAG_GENERIC
        def_bool y if METAG
@@ -270,7 +356,7 @@ config CLKSRC_EXYNOS_MCT
          Support for Multi Core Timer controller on Exynos SoCs.
 
 config CLKSRC_SAMSUNG_PWM
-       bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
+       bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
        depends on HAS_IOMEM
        help
@@ -293,6 +379,14 @@ config VF_PIT_TIMER
        help
          Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
 
+config OXNAS_RPS_TIMER
+       bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST
+       depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_OF
+       select CLKSRC_MMIO
+       help
+         This enables support for the Oxford Semiconductor OXNAS RPS timers.
+
 config SYS_SUPPORTS_SH_CMT
         bool
 
@@ -361,8 +455,8 @@ config CLKSRC_QCOM
          Qualcomm SoCs.
 
 config CLKSRC_VERSATILE
-       bool "ARM Versatile (Express) reference platforms clock source"
-       depends on PLAT_VERSATILE && GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
+       bool "ARM Versatile (Express) reference platforms clock source" if COMPILE_TEST
+       depends on GENERIC_SCHED_CLOCK && !ARCH_USES_GETTIMEOFFSET
        select CLKSRC_OF
        default y if MFD_VEXPRESS_SYSREG
        help
index 473974f9590a5cb19da5bf2bccf1e126f45bfb45..fd9d6df0bbc0993c3b7862a08f89dc3a9725be9d 100644 (file)
@@ -19,21 +19,21 @@ obj-$(CONFIG_CLKSRC_NOMADIK_MTU)    += nomadik-mtu.o
 obj-$(CONFIG_CLKSRC_DBX500_PRCMU)      += clksrc-dbx500-prcmu.o
 obj-$(CONFIG_ARMADA_370_XP_TIMER)      += time-armada-370-xp.o
 obj-$(CONFIG_ORION_TIMER)      += time-orion.o
-obj-$(CONFIG_ARCH_BCM2835)     += bcm2835_timer.o
-obj-$(CONFIG_ARCH_CLPS711X)    += clps711x-timer.o
-obj-$(CONFIG_ARCH_ATLAS7)      += timer-atlas7.o
-obj-$(CONFIG_ARCH_MOXART)      += moxart_timer.o
-obj-$(CONFIG_ARCH_MXS)         += mxs_timer.o
+obj-$(CONFIG_BCM2835_TIMER)    += bcm2835_timer.o
+obj-$(CONFIG_CLPS711X_TIMER)   += clps711x-timer.o
+obj-$(CONFIG_ATLAS7_TIMER)     += timer-atlas7.o
+obj-$(CONFIG_MOXART_TIMER)     += moxart_timer.o
+obj-$(CONFIG_MXS_TIMER)                += mxs_timer.o
 obj-$(CONFIG_CLKSRC_PXA)       += pxa_timer.o
-obj-$(CONFIG_ARCH_PRIMA2)      += timer-prima2.o
-obj-$(CONFIG_ARCH_U300)                += timer-u300.o
+obj-$(CONFIG_PRIMA2_TIMER)     += timer-prima2.o
+obj-$(CONFIG_U300_TIMER)       += timer-u300.o
 obj-$(CONFIG_SUN4I_TIMER)      += sun4i_timer.o
 obj-$(CONFIG_SUN5I_HSTIMER)    += timer-sun5i.o
 obj-$(CONFIG_MESON6_TIMER)     += meson6_timer.o
 obj-$(CONFIG_TEGRA_TIMER)      += tegra20_timer.o
 obj-$(CONFIG_VT8500_TIMER)     += vt8500_timer.o
-obj-$(CONFIG_ARCH_NSPIRE)      += zevio-timer.o
-obj-$(CONFIG_ARCH_BCM_MOBILE)  += bcm_kona_timer.o
+obj-$(CONFIG_NSPIRE_TIMER)     += zevio-timer.o
+obj-$(CONFIG_BCM_KONA_TIMER)   += bcm_kona_timer.o
 obj-$(CONFIG_CADENCE_TTC_TIMER)        += cadence_ttc_timer.o
 obj-$(CONFIG_CLKSRC_EFM32)     += time-efm32.o
 obj-$(CONFIG_CLKSRC_STM32)     += timer-stm32.o
@@ -48,6 +48,7 @@ obj-$(CONFIG_MTK_TIMER)               += mtk_timer.o
 obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
 obj-$(CONFIG_CLKSRC_TI_32K)    += timer-ti-32k.o
 obj-$(CONFIG_CLKSRC_NPS)       += timer-nps.o
+obj-$(CONFIG_OXNAS_RPS_TIMER)  += timer-oxnas-rps.o
 
 obj-$(CONFIG_ARM_ARCH_TIMER)           += arm_arch_timer.o
 obj-$(CONFIG_ARM_GLOBAL_TIMER)         += arm_global_timer.o
@@ -55,8 +56,8 @@ obj-$(CONFIG_ARMV7M_SYSTICK)          += armv7m_systick.o
 obj-$(CONFIG_ARM_TIMER_SP804)          += timer-sp804.o
 obj-$(CONFIG_CLKSRC_METAG_GENERIC)     += metag_generic.o
 obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST)  += dummy_timer.o
-obj-$(CONFIG_ARCH_KEYSTONE)            += timer-keystone.o
-obj-$(CONFIG_ARCH_INTEGRATOR_AP)       += timer-integrator-ap.o
+obj-$(CONFIG_KEYSTONE_TIMER)           += timer-keystone.o
+obj-$(CONFIG_INTEGRATOR_AP_TIMER)      += timer-integrator-ap.o
 obj-$(CONFIG_CLKSRC_VERSATILE)         += versatile.o
 obj-$(CONFIG_CLKSRC_MIPS_GIC)          += mips-gic-timer.o
 obj-$(CONFIG_CLKSRC_TANGO_XTAL)                += tango_xtal.o
index 4814446a0024d9fed6f484c700cd98b47db87a9e..5effd30273192d7b6e9c367061200aa1d7079e79 100644 (file)
@@ -79,6 +79,14 @@ static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
 static bool arch_timer_c3stop;
 static bool arch_timer_mem_use_virtual;
 
+static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM);
+
+static int __init early_evtstrm_cfg(char *buf)
+{
+       return strtobool(buf, &evtstrm_enable);
+}
+early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
+
 /*
  * Architected system timer support.
  */
@@ -372,7 +380,7 @@ static int arch_timer_setup(struct clock_event_device *clk)
                enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
 
        arch_counter_set_user_access();
-       if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
+       if (evtstrm_enable)
                arch_timer_configure_evtstream();
 
        return 0;
@@ -693,25 +701,26 @@ arch_timer_needs_probing(int type, const struct of_device_id *matches)
        return needs_probing;
 }
 
-static void __init arch_timer_common_init(void)
+static int __init arch_timer_common_init(void)
 {
        unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
 
        /* Wait until both nodes are probed if we have two timers */
        if ((arch_timers_present & mask) != mask) {
                if (arch_timer_needs_probing(ARCH_MEM_TIMER, arch_timer_mem_of_match))
-                       return;
+                       return 0;
                if (arch_timer_needs_probing(ARCH_CP15_TIMER, arch_timer_of_match))
-                       return;
+                       return 0;
        }
 
        arch_timer_banner(arch_timers_present);
        arch_counter_register(arch_timers_present);
-       arch_timer_arch_init();
+       return arch_timer_arch_init();
 }
 
-static void __init arch_timer_init(void)
+static int __init arch_timer_init(void)
 {
+       int ret;
        /*
         * If HYP mode is available, we know that the physical timer
         * has been configured to be accessible from PL1. Use it, so
@@ -739,23 +748,30 @@ static void __init arch_timer_init(void)
 
                if (!has_ppi) {
                        pr_warn("arch_timer: No interrupt available, giving up\n");
-                       return;
+                       return -EINVAL;
                }
        }
 
-       arch_timer_register();
-       arch_timer_common_init();
+       ret = arch_timer_register();
+       if (ret)
+               return ret;
+
+       ret = arch_timer_common_init();
+       if (ret)
+               return ret;
 
        arch_timer_kvm_info.virtual_irq = arch_timer_ppi[VIRT_PPI];
+       
+       return 0;
 }
 
-static void __init arch_timer_of_init(struct device_node *np)
+static int __init arch_timer_of_init(struct device_node *np)
 {
        int i;
 
        if (arch_timers_present & ARCH_CP15_TIMER) {
                pr_warn("arch_timer: multiple nodes in dt, skipping\n");
-               return;
+               return 0;
        }
 
        arch_timers_present |= ARCH_CP15_TIMER;
@@ -774,23 +790,23 @@ static void __init arch_timer_of_init(struct device_node *np)
            of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
                arch_timer_uses_ppi = PHYS_SECURE_PPI;
 
-       arch_timer_init();
+       return arch_timer_init();
 }
 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init);
 CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_of_init);
 
-static void __init arch_timer_mem_init(struct device_node *np)
+static int __init arch_timer_mem_init(struct device_node *np)
 {
        struct device_node *frame, *best_frame = NULL;
        void __iomem *cntctlbase, *base;
-       unsigned int irq;
+       unsigned int irq, ret = -EINVAL;
        u32 cnttidr;
 
        arch_timers_present |= ARCH_MEM_TIMER;
        cntctlbase = of_iomap(np, 0);
        if (!cntctlbase) {
                pr_err("arch_timer: Can't find CNTCTLBase\n");
-               return;
+               return -ENXIO;
        }
 
        cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
@@ -830,6 +846,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
                best_frame = of_node_get(frame);
        }
 
+       ret= -ENXIO;
        base = arch_counter_base = of_iomap(best_frame, 0);
        if (!base) {
                pr_err("arch_timer: Can't map frame's registers\n");
@@ -841,6 +858,7 @@ static void __init arch_timer_mem_init(struct device_node *np)
        else
                irq = irq_of_parse_and_map(best_frame, 0);
 
+       ret = -EINVAL;
        if (!irq) {
                pr_err("arch_timer: Frame missing %s irq",
                       arch_timer_mem_use_virtual ? "virt" : "phys");
@@ -848,11 +866,15 @@ static void __init arch_timer_mem_init(struct device_node *np)
        }
 
        arch_timer_detect_rate(base, np);
-       arch_timer_mem_register(base, irq);
-       arch_timer_common_init();
+       ret = arch_timer_mem_register(base, irq);
+       if (ret)
+               goto out;
+
+       return arch_timer_common_init();
 out:
        iounmap(cntctlbase);
        of_node_put(best_frame);
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
                       arch_timer_mem_init);
index 9df0d1699d22b9f6bbaa8089ca71bc9c17895de0..2a9ceb6e93f92b8dd44451c76972ce7f49d5ec6f 100644 (file)
@@ -238,7 +238,7 @@ static void __init gt_delay_timer_init(void)
        register_current_timer_delay(&gt_delay_timer);
 }
 
-static void __init gt_clocksource_init(void)
+static int __init gt_clocksource_init(void)
 {
        writel(0, gt_base + GT_CONTROL);
        writel(0, gt_base + GT_COUNTER0);
@@ -249,7 +249,7 @@ static void __init gt_clocksource_init(void)
 #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
        sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
 #endif
-       clocksource_register_hz(&gt_clocksource, gt_clk_rate);
+       return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
 }
 
 static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
@@ -270,7 +270,7 @@ static struct notifier_block gt_cpu_nb = {
        .notifier_call = gt_cpu_notify,
 };
 
-static void __init global_timer_of_register(struct device_node *np)
+static int __init global_timer_of_register(struct device_node *np)
 {
        struct clk *gt_clk;
        int err = 0;
@@ -283,19 +283,19 @@ static void __init global_timer_of_register(struct device_node *np)
        if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
            && (read_cpuid_id() & 0xf0000f) < 0x200000) {
                pr_warn("global-timer: non support for this cpu version.\n");
-               return;
+               return -ENOSYS;
        }
 
        gt_ppi = irq_of_parse_and_map(np, 0);
        if (!gt_ppi) {
                pr_warn("global-timer: unable to parse irq\n");
-               return;
+               return -EINVAL;
        }
 
        gt_base = of_iomap(np, 0);
        if (!gt_base) {
                pr_warn("global-timer: invalid base address\n");
-               return;
+               return -ENXIO;
        }
 
        gt_clk = of_clk_get(np, 0);
@@ -332,11 +332,17 @@ static void __init global_timer_of_register(struct device_node *np)
        }
 
        /* Immediately configure the timer on the boot CPU */
-       gt_clocksource_init();
-       gt_clockevents_init(this_cpu_ptr(gt_evt));
+       err = gt_clocksource_init();
+       if (err)
+               goto out_irq;
+       
+       err = gt_clockevents_init(this_cpu_ptr(gt_evt));
+       if (err)
+               goto out_irq;
+
        gt_delay_timer_init();
 
-       return;
+       return 0;
 
 out_irq:
        free_percpu_irq(gt_ppi, gt_evt);
@@ -347,6 +353,8 @@ out_clk:
 out_unmap:
        iounmap(gt_base);
        WARN(err, "ARM Global timer register failed (%d)\n", err);
+
+       return err;
 }
 
 /* Only tested on r2p2 and r3p0  */
index addfd2c64f54ef4c75aeb483d0401c1f40a77022..a315491b704751887a9427c298a4a697dddbd6a4 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/kernel.h>
 #include <linux/clocksource.h>
 #include <linux/clockchips.h>
+#include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk.h>
@@ -21,7 +22,7 @@
 
 #define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF
 
-static void __init system_timer_of_register(struct device_node *np)
+static int __init system_timer_of_register(struct device_node *np)
 {
        struct clk *clk = NULL;
        void __iomem *base;
@@ -31,22 +32,26 @@ static void __init system_timer_of_register(struct device_node *np)
        base = of_iomap(np, 0);
        if (!base) {
                pr_warn("system-timer: invalid base address\n");
-               return;
+               return -ENXIO;
        }
 
        ret = of_property_read_u32(np, "clock-frequency", &rate);
        if (ret) {
                clk = of_clk_get(np, 0);
-               if (IS_ERR(clk))
+               if (IS_ERR(clk)) {
+                       ret = PTR_ERR(clk);
                        goto out_unmap;
+               }
 
                ret = clk_prepare_enable(clk);
                if (ret)
                        goto out_clk_put;
 
                rate = clk_get_rate(clk);
-               if (!rate)
+               if (!rate) {
+                       ret = -EINVAL;
                        goto out_clk_disable;
+               }
        }
 
        writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR);
@@ -64,7 +69,7 @@ static void __init system_timer_of_register(struct device_node *np)
 
        pr_info("ARM System timer initialized as clocksource\n");
 
-       return;
+       return 0;
 
 out_clk_disable:
        clk_disable_unprepare(clk);
@@ -73,6 +78,8 @@ out_clk_put:
 out_unmap:
        iounmap(base);
        pr_warn("ARM System timer register failed (%d)\n", ret);
+
+       return ret;
 }
 
 CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick",
index 217438d39eb36ec076eefa1046675687d1b2b9c8..1ba871b7fe118454cc80d69e1998e6cfe838ac49 100644 (file)
@@ -184,7 +184,7 @@ static irqreturn_t asm9260_timer_interrupt(int irq, void *dev_id)
  * Timer initialization
  * ---------------------------------------------------------------------------
  */
-static void __init asm9260_timer_init(struct device_node *np)
+static int __init asm9260_timer_init(struct device_node *np)
 {
        int irq;
        struct clk *clk;
@@ -192,20 +192,26 @@ static void __init asm9260_timer_init(struct device_node *np)
        unsigned long rate;
 
        priv.base = of_io_request_and_map(np, 0, np->name);
-       if (IS_ERR(priv.base))
-               panic("%s: unable to map resource", np->name);
+       if (IS_ERR(priv.base)) {
+               pr_err("%s: unable to map resource", np->name);
+               return PTR_ERR(priv.base);
+       }
 
        clk = of_clk_get(np, 0);
 
        ret = clk_prepare_enable(clk);
-       if (ret)
-               panic("Failed to enable clk!\n");
+       if (ret) {
+               pr_err("Failed to enable clk!\n");
+               return ret;
+       }
 
        irq = irq_of_parse_and_map(np, 0);
        ret = request_irq(irq, asm9260_timer_interrupt, IRQF_TIMER,
                        DRIVER_NAME, &event_dev);
-       if (ret)
-               panic("Failed to setup irq!\n");
+       if (ret) {
+               pr_err("Failed to setup irq!\n");
+               return ret;
+       }
 
        /* set all timers for count-up */
        writel_relaxed(BM_DIR_DEFAULT, priv.base + HW_DIR);
@@ -229,6 +235,8 @@ static void __init asm9260_timer_init(struct device_node *np)
        priv.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
        event_dev.cpumask = cpumask_of(0);
        clockevents_config_and_register(&event_dev, rate, 0x2c00, 0xfffffffe);
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(asm9260_timer, "alphascale,asm9260-timer",
                asm9260_timer_init);
index 6f28229289635cb89361dd3c56299945c15cbcfe..e71acf231c89a8cc4be7e5afd0d43a63d73054b5 100644 (file)
@@ -80,19 +80,24 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
        }
 }
 
-static void __init bcm2835_timer_init(struct device_node *node)
+static int __init bcm2835_timer_init(struct device_node *node)
 {
        void __iomem *base;
        u32 freq;
-       int irq;
+       int irq, ret;
        struct bcm2835_timer *timer;
 
        base = of_iomap(node, 0);
-       if (!base)
-               panic("Can't remap registers");
+       if (!base) {
+               pr_err("Can't remap registers");
+               return -ENXIO;
+       }
 
-       if (of_property_read_u32(node, "clock-frequency", &freq))
-               panic("Can't read clock-frequency");
+       ret = of_property_read_u32(node, "clock-frequency", &freq);
+       if (ret) {
+               pr_err("Can't read clock-frequency");
+               return ret;
+       }
 
        system_clock = base + REG_COUNTER_LO;
        sched_clock_register(bcm2835_sched_read, 32, freq);
@@ -101,12 +106,16 @@ static void __init bcm2835_timer_init(struct device_node *node)
                freq, 300, 32, clocksource_mmio_readl_up);
 
        irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
-       if (irq <= 0)
-               panic("Can't parse IRQ");
+       if (irq <= 0) {
+               pr_err("Can't parse IRQ");
+               return -EINVAL;
+       }
 
        timer = kzalloc(sizeof(*timer), GFP_KERNEL);
-       if (!timer)
-               panic("Can't allocate timer struct\n");
+       if (!timer) {
+               pr_err("Can't allocate timer struct\n");
+               return -ENOMEM;
+       }
 
        timer->control = base + REG_CONTROL;
        timer->compare = base + REG_COMPARE(DEFAULT_TIMER);
@@ -121,12 +130,17 @@ static void __init bcm2835_timer_init(struct device_node *node)
        timer->act.dev_id = timer;
        timer->act.handler = bcm2835_time_interrupt;
 
-       if (setup_irq(irq, &timer->act))
-               panic("Can't set up timer IRQ\n");
+       ret = setup_irq(irq, &timer->act);
+       if (ret) {
+               pr_err("Can't set up timer IRQ\n");
+               return ret;
+       }
 
        clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff);
 
        pr_info("bcm2835: system timer (irq = %d)\n", irq);
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer",
                        bcm2835_timer_init);
index e717e87df9bc4b6bf69d823c76c45f61189c1ce0..7e3fd375a6278f17d26fc7b584bb98fa70459a02 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/clk.h>
 
 #include <linux/io.h>
-#include <asm/mach/time.h>
 
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -163,16 +162,11 @@ static struct irqaction kona_timer_irq = {
        .handler = kona_timer_interrupt,
 };
 
-static void __init kona_timer_init(struct device_node *node)
+static int __init kona_timer_init(struct device_node *node)
 {
        u32 freq;
        struct clk *external_clk;
 
-       if (!of_device_is_available(node)) {
-               pr_info("Kona Timer v1 marked as disabled in device tree\n");
-               return;
-       }
-
        external_clk = of_clk_get_by_name(node, NULL);
 
        if (!IS_ERR(external_clk)) {
@@ -182,7 +176,7 @@ static void __init kona_timer_init(struct device_node *node)
                arch_timer_rate = freq;
        } else {
                pr_err("Kona Timer v1 unable to determine clock-frequency");
-               return;
+               return -EINVAL;
        }
 
        /* Setup IRQ numbers */
@@ -196,6 +190,8 @@ static void __init kona_timer_init(struct device_node *node)
        kona_timer_clockevents_init();
        setup_irq(timers.tmr_irq, &kona_timer_irq);
        kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
+
+       return 0;
 }
 
 CLOCKSOURCE_OF_DECLARE(brcm_kona, "brcm,kona-timer", kona_timer_init);
index 9be6018bd2b89cc8737e2a3c3e19a277c7eb67ed..fbfbdec13b081837e3866ee06f8a9c231cce990c 100644 (file)
@@ -322,22 +322,22 @@ static int ttc_rate_change_clocksource_cb(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
-static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
+static int __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
                                         u32 timer_width)
 {
        struct ttc_timer_clocksource *ttccs;
        int err;
 
        ttccs = kzalloc(sizeof(*ttccs), GFP_KERNEL);
-       if (WARN_ON(!ttccs))
-               return;
+       if (!ttccs)
+               return -ENOMEM;
 
        ttccs->ttc.clk = clk;
 
        err = clk_prepare_enable(ttccs->ttc.clk);
-       if (WARN_ON(err)) {
+       if (err) {
                kfree(ttccs);
-               return;
+               return err;
        }
 
        ttccs->ttc.freq = clk_get_rate(ttccs->ttc.clk);
@@ -345,8 +345,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
        ttccs->ttc.clk_rate_change_nb.notifier_call =
                ttc_rate_change_clocksource_cb;
        ttccs->ttc.clk_rate_change_nb.next = NULL;
-       if (clk_notifier_register(ttccs->ttc.clk,
-                               &ttccs->ttc.clk_rate_change_nb))
+
+       err = clk_notifier_register(ttccs->ttc.clk,
+                                   &ttccs->ttc.clk_rate_change_nb);
+       if (err)
                pr_warn("Unable to register clock notifier.\n");
 
        ttccs->ttc.base_addr = base;
@@ -368,14 +370,16 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base,
                     ttccs->ttc.base_addr + TTC_CNT_CNTRL_OFFSET);
 
        err = clocksource_register_hz(&ttccs->cs, ttccs->ttc.freq / PRESCALE);
-       if (WARN_ON(err)) {
+       if (err) {
                kfree(ttccs);
-               return;
+               return err;
        }
 
        ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
        sched_clock_register(ttc_sched_clock_read, timer_width,
                             ttccs->ttc.freq / PRESCALE);
+
+       return 0;
 }
 
 static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
@@ -401,30 +405,35 @@ static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
        }
 }
 
-static void __init ttc_setup_clockevent(struct clk *clk,
-                                               void __iomem *base, u32 irq)
+static int __init ttc_setup_clockevent(struct clk *clk,
+                                      void __iomem *base, u32 irq)
 {
        struct ttc_timer_clockevent *ttcce;
        int err;
 
        ttcce = kzalloc(sizeof(*ttcce), GFP_KERNEL);
-       if (WARN_ON(!ttcce))
-               return;
+       if (!ttcce)
+               return -ENOMEM;
 
        ttcce->ttc.clk = clk;
 
        err = clk_prepare_enable(ttcce->ttc.clk);
-       if (WARN_ON(err)) {
+       if (err) {
                kfree(ttcce);
-               return;
+               return err;
        }
 
        ttcce->ttc.clk_rate_change_nb.notifier_call =
                ttc_rate_change_clockevent_cb;
        ttcce->ttc.clk_rate_change_nb.next = NULL;
-       if (clk_notifier_register(ttcce->ttc.clk,
-                               &ttcce->ttc.clk_rate_change_nb))
+
+       err = clk_notifier_register(ttcce->ttc.clk,
+                                   &ttcce->ttc.clk_rate_change_nb);
+       if (err) {
                pr_warn("Unable to register clock notifier.\n");
+               return err;
+       }
+
        ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
 
        ttcce->ttc.base_addr = base;
@@ -451,13 +460,15 @@ static void __init ttc_setup_clockevent(struct clk *clk,
 
        err = request_irq(irq, ttc_clock_event_interrupt,
                          IRQF_TIMER, ttcce->ce.name, ttcce);
-       if (WARN_ON(err)) {
+       if (err) {
                kfree(ttcce);
-               return;
+               return err;
        }
 
        clockevents_config_and_register(&ttcce->ce,
                        ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
+
+       return 0;
 }
 
 /**
@@ -466,17 +477,17 @@ static void __init ttc_setup_clockevent(struct clk *clk,
  * Initializes the timer hardware and register the clock source and clock event
  * timers with Linux kernal timer framework
  */
-static void __init ttc_timer_init(struct device_node *timer)
+static int __init ttc_timer_init(struct device_node *timer)
 {
        unsigned int irq;
        void __iomem *timer_baseaddr;
        struct clk *clk_cs, *clk_ce;
        static int initialized;
-       int clksel;
+       int clksel, ret;
        u32 timer_width = 16;
 
        if (initialized)
-               return;
+               return 0;
 
        initialized = 1;
 
@@ -488,13 +499,13 @@ static void __init ttc_timer_init(struct device_node *timer)
        timer_baseaddr = of_iomap(timer, 0);
        if (!timer_baseaddr) {
                pr_err("ERROR: invalid timer base address\n");
-               BUG();
+               return -ENXIO;
        }
 
        irq = irq_of_parse_and_map(timer, 1);
        if (irq <= 0) {
                pr_err("ERROR: invalid interrupt number\n");
-               BUG();
+               return -EINVAL;
        }
 
        of_property_read_u32(timer, "timer-width", &timer_width);
@@ -504,7 +515,7 @@ static void __init ttc_timer_init(struct device_node *timer)
        clk_cs = of_clk_get(timer, clksel);
        if (IS_ERR(clk_cs)) {
                pr_err("ERROR: timer input clock not found\n");
-               BUG();
+               return PTR_ERR(clk_cs);
        }
 
        clksel = readl_relaxed(timer_baseaddr + 4 + TTC_CLK_CNTRL_OFFSET);
@@ -512,13 +523,20 @@ static void __init ttc_timer_init(struct device_node *timer)
        clk_ce = of_clk_get(timer, clksel);
        if (IS_ERR(clk_ce)) {
                pr_err("ERROR: timer input clock not found\n");
-               BUG();
+               return PTR_ERR(clk_ce);
        }
 
-       ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
-       ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+       ret = ttc_setup_clocksource(clk_cs, timer_baseaddr, timer_width);
+       if (ret)
+               return ret;
+
+       ret = ttc_setup_clockevent(clk_ce, timer_baseaddr + 4, irq);
+       if (ret)
+               return ret;
 
        pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
+
+       return 0;
 }
 
 CLOCKSOURCE_OF_DECLARE(ttc, "cdns,ttc", ttc_timer_init);
index dfad6eb9966289831330e5cd13979a74d5eec9ea..77a365f573d7f19a385f06ceffb16011a9159d98 100644 (file)
@@ -64,7 +64,7 @@ static u64 notrace dbx500_prcmu_sched_clock_read(void)
 
 #endif
 
-static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
+static int __init clksrc_dbx500_prcmu_init(struct device_node *node)
 {
        clksrc_dbx500_timer_base = of_iomap(node, 0);
 
@@ -84,7 +84,7 @@ static void __init clksrc_dbx500_prcmu_init(struct device_node *node)
 #ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
        sched_clock_register(dbx500_prcmu_sched_clock_read, 32, RATE_32K);
 #endif
-       clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
+       return clocksource_register_hz(&clocksource_dbx500_prcmu, RATE_32K);
 }
 CLOCKSOURCE_OF_DECLARE(dbx500_prcmu, "stericsson,db8500-prcmu-timer-4",
                       clksrc_dbx500_prcmu_init);
index 7cb6c923a836f141dd5424c34039fe11a8cbe81e..bc62be97f0a875a1b4306c5fae14dedb22d303b1 100644 (file)
@@ -28,15 +28,23 @@ void __init clocksource_probe(void)
 {
        struct device_node *np;
        const struct of_device_id *match;
-       of_init_fn_1 init_func;
+       of_init_fn_1_ret init_func_ret;
        unsigned clocksources = 0;
+       int ret;
 
        for_each_matching_node_and_match(np, __clksrc_of_table, &match) {
                if (!of_device_is_available(np))
                        continue;
 
-               init_func = match->data;
-               init_func(np);
+               init_func_ret = match->data;
+
+               ret = init_func_ret(np);
+               if (ret) {
+                       pr_err("Failed to initialize '%s': %d",
+                              of_node_full_name(np), ret);
+                       continue;
+               }
+
                clocksources++;
        }
 
index 65ec4674416d0e70567164059a71c01b0fa7813e..03cc49217bb49af3021fa9295516c1e24826f094 100644 (file)
@@ -92,7 +92,7 @@ static int __init st_clksrc_setup_clk(struct device_node *np)
        return 0;
 }
 
-static void __init st_clksrc_of_register(struct device_node *np)
+static int __init st_clksrc_of_register(struct device_node *np)
 {
        int ret;
        uint32_t mode;
@@ -100,32 +100,36 @@ static void __init st_clksrc_of_register(struct device_node *np)
        ret = of_property_read_u32(np, "st,lpc-mode", &mode);
        if (ret) {
                pr_err("clksrc-st-lpc: An LPC mode must be provided\n");
-               return;
+               return ret;
        }
 
        /* LPC can either run as a Clocksource or in RTC or WDT mode */
        if (mode != ST_LPC_MODE_CLKSRC)
-               return;
+               return 0;
 
        ddata.base = of_iomap(np, 0);
        if (!ddata.base) {
                pr_err("clksrc-st-lpc: Unable to map iomem\n");
-               return;
+               return -ENXIO;
        }
 
-       if (st_clksrc_setup_clk(np)) {
+       ret = st_clksrc_setup_clk(np);
+       if (ret) {
                iounmap(ddata.base);
-               return;
+               return ret;
        }
 
-       if (st_clksrc_init()) {
+       ret = st_clksrc_init();
+       if (ret) {
                clk_disable_unprepare(ddata.clk);
                clk_put(ddata.clk);
                iounmap(ddata.base);
-               return;
+               return ret;
        }
 
        pr_info("clksrc-st-lpc: clocksource initialised - running @ %luHz\n",
                clk_get_rate(ddata.clk));
+
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(ddata, "st,stih407-lpc", st_clksrc_of_register);
index cdd86e3525bbb04107bb964c357f07d8588a934d..84aed78261e46e6cf6427298fdc78a16790bf8d1 100644 (file)
@@ -104,7 +104,7 @@ void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
 }
 
 #ifdef CONFIG_CLKSRC_OF
-static void __init clps711x_timer_init(struct device_node *np)
+static int __init clps711x_timer_init(struct device_node *np)
 {
        unsigned int irq = irq_of_parse_and_map(np, 0);
        struct clk *clock = of_clk_get(np, 0);
@@ -112,13 +112,11 @@ static void __init clps711x_timer_init(struct device_node *np)
 
        switch (of_alias_get_id(np, "timer")) {
        case CLPS711X_CLKSRC_CLOCKSOURCE:
-               BUG_ON(_clps711x_clksrc_init(clock, base));
-               break;
+               return _clps711x_clksrc_init(clock, base);
        case CLPS711X_CLKSRC_CLOCKEVENT:
-               BUG_ON(_clps711x_clkevt_init(clock, base, irq));
-               break;
+               return _clps711x_clkevt_init(clock, base, irq);
        default:
-               break;
+               return -EINVAL;
        }
 }
 CLOCKSOURCE_OF_DECLARE(clps711x, "cirrus,clps711x-timer", clps711x_timer_init);
index 860843cef5725f6a4d64fc515d9ccac7b88b2457..aee6c0d39a7c1e663827560eb763f23a0d6abea4 100644 (file)
@@ -143,7 +143,7 @@ static struct delay_timer dw_apb_delay_timer = {
 #endif
 
 static int num_called;
-static void __init dw_apb_timer_init(struct device_node *timer)
+static int __init dw_apb_timer_init(struct device_node *timer)
 {
        switch (num_called) {
        case 0:
@@ -164,6 +164,8 @@ static void __init dw_apb_timer_init(struct device_node *timer)
        }
 
        num_called++;
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
 CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
index be09bc0b5e269cb3748644456bf5bfb86d329379..0d18dd4b3bd2910d402202979ebbc5305371619f 100644 (file)
@@ -232,7 +232,7 @@ static cycles_t exynos4_read_current_timer(void)
        return exynos4_read_count_32();
 }
 
-static void __init exynos4_clocksource_init(void)
+static int __init exynos4_clocksource_init(void)
 {
        exynos4_mct_frc_start();
 
@@ -244,6 +244,8 @@ static void __init exynos4_clocksource_init(void)
                panic("%s: can't register clocksource\n", mct_frc.name);
 
        sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
+
+       return 0;
 }
 
 static void exynos4_mct_comp0_stop(void)
@@ -335,12 +337,14 @@ static struct irqaction mct_comp_event_irq = {
        .dev_id         = &mct_comp_device,
 };
 
-static void exynos4_clockevent_init(void)
+static int exynos4_clockevent_init(void)
 {
        mct_comp_device.cpumask = cpumask_of(0);
        clockevents_config_and_register(&mct_comp_device, clk_rate,
                                        0xf, 0xffffffff);
        setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
+
+       return 0;
 }
 
 static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
@@ -516,7 +520,7 @@ static struct notifier_block exynos4_mct_cpu_nb = {
        .notifier_call = exynos4_mct_cpu_notify,
 };
 
-static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
 {
        int err, cpu;
        struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
@@ -572,15 +576,17 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem
 
        /* Immediately configure the timer on the boot CPU */
        exynos4_local_timer_setup(mevt);
-       return;
+       return 0;
 
 out_irq:
        free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
+       return err;
 }
 
-static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
+static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
 {
        u32 nr_irqs, i;
+       int ret;
 
        mct_int_type = int_type;
 
@@ -600,18 +606,24 @@ static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
        for (i = MCT_L0_IRQ; i < nr_irqs; i++)
                mct_irqs[i] = irq_of_parse_and_map(np, i);
 
-       exynos4_timer_resources(np, of_iomap(np, 0));
-       exynos4_clocksource_init();
-       exynos4_clockevent_init();
+       ret = exynos4_timer_resources(np, of_iomap(np, 0));
+       if (ret)
+               return ret;
+
+       ret = exynos4_clocksource_init();
+       if (ret)
+               return ret;
+
+       return exynos4_clockevent_init();
 }
 
 
-static void __init mct_init_spi(struct device_node *np)
+static int __init mct_init_spi(struct device_node *np)
 {
        return mct_init_dt(np, MCT_INT_SPI);
 }
 
-static void __init mct_init_ppi(struct device_node *np)
+static int __init mct_init_ppi(struct device_node *np)
 {
        return mct_init_dt(np, MCT_INT_PPI);
 }
index 517e1c7624d442f7de842fa011516415d86c66b3..738515b89073ccab553a35d6081ed78787984d12 100644 (file)
@@ -316,15 +316,16 @@ static int __init ftm_calc_closest_round_cyc(unsigned long freq)
        return 0;
 }
 
-static void __init ftm_timer_init(struct device_node *np)
+static int __init ftm_timer_init(struct device_node *np)
 {
        unsigned long freq;
-       int irq;
+       int ret, irq;
 
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
-               return;
+               return -ENOMEM;
 
+       ret = -ENXIO;
        priv->clkevt_base = of_iomap(np, 0);
        if (!priv->clkevt_base) {
                pr_err("ftm: unable to map event timer registers\n");
@@ -337,6 +338,7 @@ static void __init ftm_timer_init(struct device_node *np)
                goto err;
        }
 
+       ret = -EINVAL;
        irq = irq_of_parse_and_map(np, 0);
        if (irq <= 0) {
                pr_err("ftm: unable to get IRQ from DT, %d\n", irq);
@@ -349,18 +351,22 @@ static void __init ftm_timer_init(struct device_node *np)
        if (!freq)
                goto err;
 
-       if (ftm_calc_closest_round_cyc(freq))
+       ret = ftm_calc_closest_round_cyc(freq);
+       if (ret)
                goto err;
 
-       if (ftm_clocksource_init(freq))
+       ret = ftm_clocksource_init(freq);
+       if (ret)
                goto err;
 
-       if (ftm_clockevent_init(freq, irq))
+       ret = ftm_clockevent_init(freq, irq);
+       if (ret)
                goto err;
 
-       return;
+       return 0;
 
 err:
        kfree(priv);
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(flextimer, "fsl,ftm-timer", ftm_timer_init);
index 75c44079b3454513b2f6a788872b4c579c7f27b8..07d9d5be9054a6e46aeef0a933e0344b1dcf84dc 100644 (file)
@@ -126,7 +126,7 @@ static struct timer16_priv timer16_priv = {
 #define REG_CH   0
 #define REG_COMM 1
 
-static void __init h8300_16timer_init(struct device_node *node)
+static int __init h8300_16timer_init(struct device_node *node)
 {
        void __iomem *base[2];
        int ret, irq;
@@ -136,9 +136,10 @@ static void __init h8300_16timer_init(struct device_node *node)
        clk = of_clk_get(node, 0);
        if (IS_ERR(clk)) {
                pr_err("failed to get clock for clocksource\n");
-               return;
+               return PTR_ERR(clk);
        }
 
+       ret = -ENXIO;
        base[REG_CH] = of_iomap(node, 0);
        if (!base[REG_CH]) {
                pr_err("failed to map registers for clocksource\n");
@@ -151,6 +152,7 @@ static void __init h8300_16timer_init(struct device_node *node)
                goto unmap_ch;
        }
 
+       ret = -EINVAL;
        irq = irq_of_parse_and_map(node, 0);
        if (!irq) {
                pr_err("failed to get irq for clockevent\n");
@@ -174,7 +176,7 @@ static void __init h8300_16timer_init(struct device_node *node)
 
        clocksource_register_hz(&timer16_priv.cs,
                                clk_get_rate(clk) / 8);
-       return;
+       return 0;
 
 unmap_comm:
        iounmap(base[REG_COMM]);
@@ -182,6 +184,8 @@ unmap_ch:
        iounmap(base[REG_CH]);
 free_clk:
        clk_put(clk);
+       return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer", h8300_16timer_init);
+CLOCKSOURCE_OF_DECLARE(h8300_16bit, "renesas,16bit-timer",
+                          h8300_16timer_init);
index c151941e1956d3a8338edeeeefd4c09e0d78066c..546bb180f5a4481d6d13f7b68e37537940c3e795 100644 (file)
@@ -164,24 +164,26 @@ static struct timer8_priv timer8_priv = {
        },
 };
 
-static void __init h8300_8timer_init(struct device_node *node)
+static int __init h8300_8timer_init(struct device_node *node)
 {
        void __iomem *base;
-       int irq;
+       int irq, ret;
        struct clk *clk;
 
        clk = of_clk_get(node, 0);
        if (IS_ERR(clk)) {
                pr_err("failed to get clock for clockevent\n");
-               return;
+               return PTR_ERR(clk);
        }
 
+       ret = ENXIO;
        base = of_iomap(node, 0);
        if (!base) {
                pr_err("failed to map registers for clockevent\n");
                goto free_clk;
        }
 
+       ret = -EINVAL;
        irq = irq_of_parse_and_map(node, 0);
        if (!irq) {
                pr_err("failed to get irq for clockevent\n");
@@ -205,11 +207,12 @@ static void __init h8300_8timer_init(struct device_node *node)
        clockevents_config_and_register(&timer8_priv.ced,
                                        timer8_priv.rate, 1, 0x0000ffff);
 
-       return;
+       return 0;
 unmap_reg:
        iounmap(base);
 free_clk:
        clk_put(clk);
+       return ret;
 }
 
 CLOCKSOURCE_OF_DECLARE(h8300_8bit, "renesas,8bit-timer", h8300_8timer_init);
index d4c1a287c2622e459bbe544399e872cddcc44439..7bdf1991c847448525c3dc9d6fddf4e758361375 100644 (file)
@@ -119,15 +119,16 @@ static struct tpu_priv tpu_priv = {
 #define CH_L 0
 #define CH_H 1
 
-static void __init h8300_tpu_init(struct device_node *node)
+static int __init h8300_tpu_init(struct device_node *node)
 {
        void __iomem *base[2];
        struct clk *clk;
+       int ret = -ENXIO;
 
        clk = of_clk_get(node, 0);
        if (IS_ERR(clk)) {
                pr_err("failed to get clock for clocksource\n");
-               return;
+               return PTR_ERR(clk);
        }
 
        base[CH_L] = of_iomap(node, CH_L);
@@ -144,14 +145,13 @@ static void __init h8300_tpu_init(struct device_node *node)
        tpu_priv.mapbase1 = base[CH_L];
        tpu_priv.mapbase2 = base[CH_H];
 
-       clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
-
-       return;
+       return clocksource_register_hz(&tpu_priv.cs, clk_get_rate(clk) / 64);
 
 unmap_L:
        iounmap(base[CH_H]);
 free_clk:
        clk_put(clk);
+       return ret;
 }
 
 CLOCKSOURCE_OF_DECLARE(h8300_tpu, "renesas,tpu", h8300_tpu_init);
index 1fa22c4d2d49b24fe2be0230b9325bf4f07cf251..52af591a9fc704e7c09650be2ec73509ed9c699f 100644 (file)
@@ -126,18 +126,22 @@ static struct irqaction meson6_timer_irq = {
        .dev_id         = &meson6_clockevent,
 };
 
-static void __init meson6_timer_init(struct device_node *node)
+static int __init meson6_timer_init(struct device_node *node)
 {
        u32 val;
        int ret, irq;
 
        timer_base = of_io_request_and_map(node, 0, "meson6-timer");
-       if (IS_ERR(timer_base))
-               panic("Can't map registers");
+       if (IS_ERR(timer_base)) {
+               pr_err("Can't map registers");
+               return -ENXIO;
+       }
 
        irq = irq_of_parse_and_map(node, 0);
-       if (irq <= 0)
-               panic("Can't parse IRQ");
+       if (irq <= 0) {
+               pr_err("Can't parse IRQ");
+               return -EINVAL;
+       }
 
        /* Set 1us for timer E */
        val = readl(timer_base + TIMER_ISA_MUX);
@@ -158,14 +162,17 @@ static void __init meson6_timer_init(struct device_node *node)
        meson6_clkevt_time_stop(CED_ID);
 
        ret = setup_irq(irq, &meson6_timer_irq);
-       if (ret)
+       if (ret) {
                pr_warn("failed to setup irq %d\n", irq);
+               return ret;
+       }
 
        meson6_clockevent.cpumask = cpu_possible_mask;
        meson6_clockevent.irq = irq;
 
        clockevents_config_and_register(&meson6_clockevent, USEC_PER_SEC,
                                        1, 0xfffe);
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(meson6, "amlogic,meson6-timer",
                       meson6_timer_init);
index 89d3e4d7900c51f384f3b90200c1fecb2bab013f..1572c7a778abbb97d729f5ec25bcb0ab1c393d45 100644 (file)
@@ -146,7 +146,7 @@ static struct clocksource gic_clocksource = {
        .archdata       = { .vdso_clock_mode = VDSO_CLOCK_GIC },
 };
 
-static void __init __gic_clocksource_init(void)
+static int __init __gic_clocksource_init(void)
 {
        int ret;
 
@@ -159,6 +159,8 @@ static void __init __gic_clocksource_init(void)
        ret = clocksource_register_hz(&gic_clocksource, gic_frequency);
        if (ret < 0)
                pr_warn("GIC: Unable to register clocksource\n");
+
+       return ret;
 }
 
 void __init gic_clocksource_init(unsigned int frequency)
@@ -179,31 +181,35 @@ static void __init gic_clocksource_of_init(struct device_node *node)
        struct clk *clk;
        int ret;
 
-       if (WARN_ON(!gic_present || !node->parent ||
-                   !of_device_is_compatible(node->parent, "mti,gic")))
-               return;
+       if (!gic_present || !node->parent ||
+           !of_device_is_compatible(node->parent, "mti,gic")) {
+               pr_warn("No DT definition for the mips gic driver");
+               return -ENXIO;
+       }
 
        clk = of_clk_get(node, 0);
        if (!IS_ERR(clk)) {
                if (clk_prepare_enable(clk) < 0) {
                        pr_err("GIC failed to enable clock\n");
                        clk_put(clk);
-                       return;
+                       return PTR_ERR(clk);
                }
 
                gic_frequency = clk_get_rate(clk);
        } else if (of_property_read_u32(node, "clock-frequency",
                                        &gic_frequency)) {
                pr_err("GIC frequency not specified.\n");
-               return;
+               return -EINVAL;;
        }
        gic_timer_irq = irq_of_parse_and_map(node, 0);
        if (!gic_timer_irq) {
                pr_err("GIC timer IRQ not specified.\n");
-               return;
+               return -EINVAL;;
        }
 
-       __gic_clocksource_init();
+       ret = __gic_clocksource_init();
+       if (ret)
+               return ret;
 
        ret = gic_clockevent_init();
        if (!ret && !IS_ERR(clk)) {
@@ -213,6 +219,8 @@ static void __init gic_clocksource_of_init(struct device_node *node)
 
        /* And finally start the counter */
        gic_start_count();
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
                       gic_clocksource_of_init);
index 19857af651c1cfceafa9f2a0f3a465333228fb3d..841454417acd9ae3a08840bb932f2bce10635360 100644 (file)
@@ -119,34 +119,45 @@ static struct irqaction moxart_timer_irq = {
        .dev_id         = &moxart_clockevent,
 };
 
-static void __init moxart_timer_init(struct device_node *node)
+static int __init moxart_timer_init(struct device_node *node)
 {
        int ret, irq;
        unsigned long pclk;
        struct clk *clk;
 
        base = of_iomap(node, 0);
-       if (!base)
-               panic("%s: of_iomap failed\n", node->full_name);
+       if (!base) {
+               pr_err("%s: of_iomap failed\n", node->full_name);
+               return -ENXIO;
+       }
 
        irq = irq_of_parse_and_map(node, 0);
-       if (irq <= 0)
-               panic("%s: irq_of_parse_and_map failed\n", node->full_name);
+       if (irq <= 0) {
+               pr_err("%s: irq_of_parse_and_map failed\n", node->full_name);
+               return -EINVAL;
+       }
 
        ret = setup_irq(irq, &moxart_timer_irq);
-       if (ret)
-               panic("%s: setup_irq failed\n", node->full_name);
+       if (ret) {
+               pr_err("%s: setup_irq failed\n", node->full_name);
+               return ret;
+       }
 
        clk = of_clk_get(node, 0);
-       if (IS_ERR(clk))
-               panic("%s: of_clk_get failed\n", node->full_name);
+       if (IS_ERR(clk))  {
+               pr_err("%s: of_clk_get failed\n", node->full_name);
+               return PTR_ERR(clk);
+       }
 
        pclk = clk_get_rate(clk);
 
-       if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
-                                 "moxart_timer", pclk, 200, 32,
-                                 clocksource_mmio_readl_down))
-               panic("%s: clocksource_mmio_init failed\n", node->full_name);
+       ret = clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
+                                   "moxart_timer", pclk, 200, 32,
+                                   clocksource_mmio_readl_down);
+       if (ret) {
+               pr_err("%s: clocksource_mmio_init failed\n", node->full_name);
+               return ret;
+       }
 
        clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
 
@@ -164,5 +175,7 @@ static void __init moxart_timer_init(struct device_node *node)
         */
        clockevents_config_and_register(&moxart_clockevent, pclk,
                                        0x4, 0xfffffffe);
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
index 3d33a5e23dee762540815ac4263bdf1463127995..3e4431ed9aa92a4354d1f5468ce230c964e07770 100644 (file)
@@ -250,7 +250,7 @@ out:
        return ret;
 }
 
-static void __init mps2_timer_init(struct device_node *np)
+static int __init mps2_timer_init(struct device_node *np)
 {
        static int has_clocksource, has_clockevent;
        int ret;
@@ -259,7 +259,7 @@ static void __init mps2_timer_init(struct device_node *np)
                ret = mps2_clocksource_init(np);
                if (!ret) {
                        has_clocksource = 1;
-                       return;
+                       return 0;
                }
        }
 
@@ -267,9 +267,11 @@ static void __init mps2_timer_init(struct device_node *np)
                ret = mps2_clockevent_init(np);
                if (!ret) {
                        has_clockevent = 1;
-                       return;
+                       return 0;
                }
        }
+
+       return 0;
 }
 
 CLOCKSOURCE_OF_DECLARE(mps2_timer, "arm,mps2-timer", mps2_timer_init);
index 7e583f8ea5f4ea0d99cd1ca7441d0c16d74975ac..90659493c59c4a5a284a16429d306a4d1eae5caa 100644 (file)
@@ -181,7 +181,7 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer)
                        evt->gpt_base + GPT_IRQ_EN_REG);
 }
 
-static void __init mtk_timer_init(struct device_node *node)
+static int __init mtk_timer_init(struct device_node *node)
 {
        struct mtk_clock_event_device *evt;
        struct resource res;
@@ -190,7 +190,7 @@ static void __init mtk_timer_init(struct device_node *node)
 
        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
        if (!evt)
-               return;
+               return -ENOMEM;
 
        evt->dev.name = "mtk_tick";
        evt->dev.rating = 300;
@@ -248,7 +248,7 @@ static void __init mtk_timer_init(struct device_node *node)
 
        mtk_timer_enable_irq(evt, GPT_CLK_EVT);
 
-       return;
+       return 0;
 
 err_clk_disable:
        clk_disable_unprepare(clk);
@@ -262,5 +262,7 @@ err_mem:
        release_mem_region(res.start, resource_size(&res));
 err_kzalloc:
        kfree(evt);
+
+       return -EINVAL;
 }
 CLOCKSOURCE_OF_DECLARE(mtk_mt6577, "mediatek,mt6577-timer", mtk_timer_init);
index f5ce2961c0d62372328d7b2286675692248287dd..0ba0a913b41d113e7459b6dc21e07e00bb0d0648 100644 (file)
@@ -31,8 +31,6 @@
 #include <linux/stmp_device.h>
 #include <linux/sched_clock.h>
 
-#include <asm/mach/time.h>
-
 /*
  * There are 2 versions of the timrot on Freescale MXS-based SoCs.
  * The v1 on MX23 only gets 16 bits counter, while v2 on MX28
@@ -226,10 +224,10 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
        return 0;
 }
 
-static void __init mxs_timer_init(struct device_node *np)
+static int __init mxs_timer_init(struct device_node *np)
 {
        struct clk *timer_clk;
-       int irq;
+       int irq, ret;
 
        mxs_timrot_base = of_iomap(np, 0);
        WARN_ON(!mxs_timrot_base);
@@ -237,10 +235,12 @@ static void __init mxs_timer_init(struct device_node *np)
        timer_clk = of_clk_get(np, 0);
        if (IS_ERR(timer_clk)) {
                pr_err("%s: failed to get clk\n", __func__);
-               return;
+               return PTR_ERR(timer_clk);
        }
 
-       clk_prepare_enable(timer_clk);
+       ret = clk_prepare_enable(timer_clk);
+       if (ret)
+               return ret;
 
        /*
         * Initialize timers to a known state
@@ -278,11 +278,19 @@ static void __init mxs_timer_init(struct device_node *np)
                        mxs_timrot_base + HW_TIMROT_FIXED_COUNTn(1));
 
        /* init and register the timer to the framework */
-       mxs_clocksource_init(timer_clk);
-       mxs_clockevent_init(timer_clk);
+       ret = mxs_clocksource_init(timer_clk);
+       if (ret)
+               return ret;
+
+       ret = mxs_clockevent_init(timer_clk);
+       if (ret)
+               return ret;
 
        /* Make irqs happen */
        irq = irq_of_parse_and_map(np, 0);
-       setup_irq(irq, &mxs_timer_irq);
+       if (irq <= 0)
+               return -EINVAL;
+
+       return setup_irq(irq, &mxs_timer_irq);
 }
 CLOCKSOURCE_OF_DECLARE(mxs, "fsl,timrot", mxs_timer_init);
index bc8dd443c72711f776eafd90562a35949ccd0856..3c124d1ca600b1a212cf75c9efb7fcb7afd1c055 100644 (file)
@@ -193,10 +193,11 @@ static struct irqaction nmdk_timer_irq = {
        .dev_id         = &nmdk_clkevt,
 };
 
-static void __init nmdk_timer_init(void __iomem *base, int irq,
+static int __init nmdk_timer_init(void __iomem *base, int irq,
                                   struct clk *pclk, struct clk *clk)
 {
        unsigned long rate;
+       int ret;
 
        mtu_base = base;
 
@@ -226,10 +227,12 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
        /* Timer 0 is the free running clocksource */
        nmdk_clksrc_reset();
 
-       if (clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
-                       rate, 200, 32, clocksource_mmio_readl_down))
-               pr_err("timer: failed to initialize clock source %s\n",
-                      "mtu_0");
+       ret = clocksource_mmio_init(mtu_base + MTU_VAL(0), "mtu_0",
+                                   rate, 200, 32, clocksource_mmio_readl_down);
+       if (ret) {
+               pr_err("timer: failed to initialize clock source %s\n", "mtu_0");
+               return ret;
+       }
 
 #ifdef CONFIG_CLKSRC_NOMADIK_MTU_SCHED_CLOCK
        sched_clock_register(nomadik_read_sched_clock, 32, rate);
@@ -244,9 +247,11 @@ static void __init nmdk_timer_init(void __iomem *base, int irq,
        mtu_delay_timer.read_current_timer = &nmdk_timer_read_current_timer;
        mtu_delay_timer.freq = rate;
        register_current_timer_delay(&mtu_delay_timer);
+
+       return 0;
 }
 
-static void __init nmdk_timer_of_init(struct device_node *node)
+static int __init nmdk_timer_of_init(struct device_node *node)
 {
        struct clk *pclk;
        struct clk *clk;
@@ -254,22 +259,30 @@ static void __init nmdk_timer_of_init(struct device_node *node)
        int irq;
 
        base = of_iomap(node, 0);
-       if (!base)
-               panic("Can't remap registers");
+       if (!base) {
+               pr_err("Can't remap registers");
+               return -ENXIO;
+       }
 
        pclk = of_clk_get_by_name(node, "apb_pclk");
-       if (IS_ERR(pclk))
-               panic("could not get apb_pclk");
+       if (IS_ERR(pclk)) {
+               pr_err("could not get apb_pclk");
+               return PTR_ERR(pclk);
+       }
 
        clk = of_clk_get_by_name(node, "timclk");
-       if (IS_ERR(clk))
-               panic("could not get timclk");
+       if (IS_ERR(clk)) {
+               pr_err("could not get timclk");
+               return PTR_ERR(clk);
+       }
 
        irq = irq_of_parse_and_map(node, 0);
-       if (irq <= 0)
-               panic("Can't parse IRQ");
+       if (irq <= 0) {
+               pr_err("Can't parse IRQ");
+               return -EINVAL;
+       }
 
-       nmdk_timer_init(base, irq, pclk, clk);
+       return nmdk_timer_init(base, irq, pclk, clk);
 }
 CLOCKSOURCE_OF_DECLARE(nomadik_mtu, "st,nomadik-mtu",
                       nmdk_timer_of_init);
index 45b6a49997137beb7f67877a682a19c2066fffe6..937e10b84d5850311d878988780fb7aefee069e6 100644 (file)
@@ -150,8 +150,10 @@ static struct irqaction pxa_ost0_irq = {
        .dev_id         = &ckevt_pxa_osmr0,
 };
 
-static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
+static int __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
 {
+       int ret;
+
        timer_writel(0, OIER);
        timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR);
 
@@ -159,39 +161,57 @@ static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate)
 
        ckevt_pxa_osmr0.cpumask = cpumask_of(0);
 
-       setup_irq(irq, &pxa_ost0_irq);
+       ret = setup_irq(irq, &pxa_ost0_irq);
+       if (ret) {
+               pr_err("Failed to setup irq");
+               return ret;
+       }
+
+       ret = clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
+                                   32, clocksource_mmio_readl_up);
+       if (ret) {
+               pr_err("Failed to init clocksource");
+               return ret;
+       }
 
-       clocksource_mmio_init(timer_base + OSCR, "oscr0", clock_tick_rate, 200,
-                             32, clocksource_mmio_readl_up);
        clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate,
                                        MIN_OSCR_DELTA * 2, 0x7fffffff);
+
+       return 0;
 }
 
-static void __init pxa_timer_dt_init(struct device_node *np)
+static int __init pxa_timer_dt_init(struct device_node *np)
 {
        struct clk *clk;
-       int irq;
+       int irq, ret;
 
        /* timer registers are shared with watchdog timer */
        timer_base = of_iomap(np, 0);
-       if (!timer_base)
-               panic("%s: unable to map resource\n", np->name);
+       if (!timer_base) {
+               pr_err("%s: unable to map resource\n", np->name);
+               return -ENXIO;
+       }
 
        clk = of_clk_get(np, 0);
        if (IS_ERR(clk)) {
                pr_crit("%s: unable to get clk\n", np->name);
-               return;
+               return PTR_ERR(clk);
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret) {
+               pr_crit("Failed to prepare clock");
+               return ret;
        }
-       clk_prepare_enable(clk);
 
        /* we are only interested in OS-timer0 irq */
        irq = irq_of_parse_and_map(np, 0);
        if (irq <= 0) {
                pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
-               return;
+               return -EINVAL;
        }
 
-       pxa_timer_common_init(irq, clk_get_rate(clk));
+       return pxa_timer_common_init(irq, clk_get_rate(clk));
 }
 CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init);
 
index f8e09f9236512ac8328fd992569cca171e4e16e5..662576339049e4e4b7152dfe8f5810c414059de7 100644 (file)
@@ -178,7 +178,7 @@ static struct delay_timer msm_delay_timer = {
        .read_current_timer = msm_read_current_timer,
 };
 
-static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
+static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
                                  bool percpu)
 {
        struct clocksource *cs = &msm_clocksource;
@@ -218,12 +218,14 @@ err:
        sched_clock_register(msm_sched_clock_read, sched_bits, dgt_hz);
        msm_delay_timer.freq = dgt_hz;
        register_current_timer_delay(&msm_delay_timer);
+
+       return res;
 }
 
-static void __init msm_dt_timer_init(struct device_node *np)
+static int __init msm_dt_timer_init(struct device_node *np)
 {
        u32 freq;
-       int irq;
+       int irq, ret;
        struct resource res;
        u32 percpu_offset;
        void __iomem *base;
@@ -232,34 +234,35 @@ static void __init msm_dt_timer_init(struct device_node *np)
        base = of_iomap(np, 0);
        if (!base) {
                pr_err("Failed to map event base\n");
-               return;
+               return -ENXIO;
        }
 
        /* We use GPT0 for the clockevent */
        irq = irq_of_parse_and_map(np, 1);
        if (irq <= 0) {
                pr_err("Can't get irq\n");
-               return;
+               return -EINVAL;
        }
 
        /* We use CPU0's DGT for the clocksource */
        if (of_property_read_u32(np, "cpu-offset", &percpu_offset))
                percpu_offset = 0;
 
-       if (of_address_to_resource(np, 0, &res)) {
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret) {
                pr_err("Failed to parse DGT resource\n");
-               return;
+               return ret;
        }
 
        cpu0_base = ioremap(res.start + percpu_offset, resource_size(&res));
        if (!cpu0_base) {
                pr_err("Failed to map source base\n");
-               return;
+               return -EINVAL;
        }
 
        if (of_property_read_u32(np, "clock-frequency", &freq)) {
                pr_err("Unknown frequency\n");
-               return;
+               return -EINVAL;
        }
 
        event_base = base + 0x4;
@@ -268,7 +271,7 @@ static void __init msm_dt_timer_init(struct device_node *np)
        freq /= 4;
        writel_relaxed(DGT_CLK_CTL_DIV_4, source_base + DGT_CLK_CTL);
 
-       msm_timer_init(freq, 32, irq, !!percpu_offset);
+       return msm_timer_init(freq, 32, irq, !!percpu_offset);
 }
 CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
 CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
index b991b288c8032740f4eb0a0a4a30d056fcbb6694..23e267acba25c6c9374d24e6266ed9cc7ceb7cff 100644 (file)
@@ -19,7 +19,8 @@
 
 #define TIMER_LOAD_COUNT0      0x00
 #define TIMER_LOAD_COUNT1      0x04
-#define TIMER_CONTROL_REG      0x10
+#define TIMER_CONTROL_REG3288  0x10
+#define TIMER_CONTROL_REG3399  0x1c
 #define TIMER_INT_STATUS       0x18
 
 #define TIMER_DISABLE          0x0
@@ -31,6 +32,7 @@
 struct bc_timer {
        struct clock_event_device ce;
        void __iomem *base;
+       void __iomem *ctrl;
        u32 freq;
 };
 
@@ -46,15 +48,20 @@ static inline void __iomem *rk_base(struct clock_event_device *ce)
        return rk_timer(ce)->base;
 }
 
+static inline void __iomem *rk_ctrl(struct clock_event_device *ce)
+{
+       return rk_timer(ce)->ctrl;
+}
+
 static inline void rk_timer_disable(struct clock_event_device *ce)
 {
-       writel_relaxed(TIMER_DISABLE, rk_base(ce) + TIMER_CONTROL_REG);
+       writel_relaxed(TIMER_DISABLE, rk_ctrl(ce));
 }
 
 static inline void rk_timer_enable(struct clock_event_device *ce, u32 flags)
 {
        writel_relaxed(TIMER_ENABLE | TIMER_INT_UNMASK | flags,
-                      rk_base(ce) + TIMER_CONTROL_REG);
+                      rk_ctrl(ce));
 }
 
 static void rk_timer_update_counter(unsigned long cycles,
@@ -106,37 +113,42 @@ static irqreturn_t rk_timer_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static void __init rk_timer_init(struct device_node *np)
+static int __init rk_timer_init(struct device_node *np, u32 ctrl_reg)
 {
        struct clock_event_device *ce = &bc_timer.ce;
        struct clk *timer_clk;
        struct clk *pclk;
-       int ret, irq;
+       int ret = -EINVAL, irq;
 
        bc_timer.base = of_iomap(np, 0);
        if (!bc_timer.base) {
                pr_err("Failed to get base address for '%s'\n", TIMER_NAME);
-               return;
+               return -ENXIO;
        }
+       bc_timer.ctrl = bc_timer.base + ctrl_reg;
 
        pclk = of_clk_get_by_name(np, "pclk");
        if (IS_ERR(pclk)) {
+               ret = PTR_ERR(pclk);
                pr_err("Failed to get pclk for '%s'\n", TIMER_NAME);
                goto out_unmap;
        }
 
-       if (clk_prepare_enable(pclk)) {
+       ret = clk_prepare_enable(pclk);
+       if (ret) {
                pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME);
                goto out_unmap;
        }
 
        timer_clk = of_clk_get_by_name(np, "timer");
        if (IS_ERR(timer_clk)) {
+               ret = PTR_ERR(timer_clk);
                pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME);
                goto out_timer_clk;
        }
 
-       if (clk_prepare_enable(timer_clk)) {
+       ret = clk_prepare_enable(timer_clk);
+       if (ret) {
                pr_err("Failed to enable timer clock\n");
                goto out_timer_clk;
        }
@@ -145,17 +157,19 @@ static void __init rk_timer_init(struct device_node *np)
 
        irq = irq_of_parse_and_map(np, 0);
        if (!irq) {
+               ret = -EINVAL;
                pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
                goto out_irq;
        }
 
        ce->name = TIMER_NAME;
-       ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+       ce->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+                      CLOCK_EVT_FEAT_DYNIRQ;
        ce->set_next_event = rk_timer_set_next_event;
        ce->set_state_shutdown = rk_timer_shutdown;
        ce->set_state_periodic = rk_timer_set_periodic;
        ce->irq = irq;
-       ce->cpumask = cpumask_of(0);
+       ce->cpumask = cpu_possible_mask;
        ce->rating = 250;
 
        rk_timer_interrupt_clear(ce);
@@ -169,7 +183,7 @@ static void __init rk_timer_init(struct device_node *np)
 
        clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX);
 
-       return;
+       return 0;
 
 out_irq:
        clk_disable_unprepare(timer_clk);
@@ -177,6 +191,21 @@ out_timer_clk:
        clk_disable_unprepare(pclk);
 out_unmap:
        iounmap(bc_timer.base);
+
+       return ret;
+}
+
+static int __init rk3288_timer_init(struct device_node *np)
+{
+       return rk_timer_init(np, TIMER_CONTROL_REG3288);
+}
+
+static int __init rk3399_timer_init(struct device_node *np)
+{
+       return rk_timer_init(np, TIMER_CONTROL_REG3399);
 }
 
-CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init);
+CLOCKSOURCE_OF_DECLARE(rk3288_timer, "rockchip,rk3288-timer",
+                      rk3288_timer_init);
+CLOCKSOURCE_OF_DECLARE(rk3399_timer, "rockchip,rk3399-timer",
+                      rk3399_timer_init);
index 9502bc4c3f6d9a17ec7616bf743c5bfc26c3f638..54565bd0093bfc02673d6f27841315c589b8ad70 100644 (file)
@@ -130,9 +130,9 @@ static void samsung_time_stop(unsigned int channel)
 
        spin_lock_irqsave(&samsung_pwm_lock, flags);
 
-       tcon = __raw_readl(pwm.base + REG_TCON);
+       tcon = readl_relaxed(pwm.base + REG_TCON);
        tcon &= ~TCON_START(channel);
-       __raw_writel(tcon, pwm.base + REG_TCON);
+       writel_relaxed(tcon, pwm.base + REG_TCON);
 
        spin_unlock_irqrestore(&samsung_pwm_lock, flags);
 }
@@ -148,14 +148,14 @@ static void samsung_time_setup(unsigned int channel, unsigned long tcnt)
 
        spin_lock_irqsave(&samsung_pwm_lock, flags);
 
-       tcon = __raw_readl(pwm.base + REG_TCON);
+       tcon = readl_relaxed(pwm.base + REG_TCON);
 
        tcon &= ~(TCON_START(tcon_chan) | TCON_AUTORELOAD(tcon_chan));
        tcon |= TCON_MANUALUPDATE(tcon_chan);
 
-       __raw_writel(tcnt, pwm.base + REG_TCNTB(channel));
-       __raw_writel(tcnt, pwm.base + REG_TCMPB(channel));
-       __raw_writel(tcon, pwm.base + REG_TCON);
+       writel_relaxed(tcnt, pwm.base + REG_TCNTB(channel));
+       writel_relaxed(tcnt, pwm.base + REG_TCMPB(channel));
+       writel_relaxed(tcon, pwm.base + REG_TCON);
 
        spin_unlock_irqrestore(&samsung_pwm_lock, flags);
 }
@@ -170,7 +170,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
 
        spin_lock_irqsave(&samsung_pwm_lock, flags);
 
-       tcon = __raw_readl(pwm.base + REG_TCON);
+       tcon = readl_relaxed(pwm.base + REG_TCON);
 
        tcon &= ~TCON_MANUALUPDATE(channel);
        tcon |= TCON_START(channel);
@@ -180,7 +180,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
        else
                tcon &= ~TCON_AUTORELOAD(channel);
 
-       __raw_writel(tcon, pwm.base + REG_TCON);
+       writel_relaxed(tcon, pwm.base + REG_TCON);
 
        spin_unlock_irqrestore(&samsung_pwm_lock, flags);
 }
@@ -333,11 +333,10 @@ static u64 notrace samsung_read_sched_clock(void)
        return samsung_clocksource_read(NULL);
 }
 
-static void __init samsung_clocksource_init(void)
+static int __init samsung_clocksource_init(void)
 {
        unsigned long pclk;
        unsigned long clock_rate;
-       int ret;
 
        pclk = clk_get_rate(pwm.timerclk);
 
@@ -358,9 +357,7 @@ static void __init samsung_clocksource_init(void)
                                                pwm.variant.bits, clock_rate);
 
        samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
-       ret = clocksource_register_hz(&samsung_clocksource, clock_rate);
-       if (ret)
-               panic("samsung_clocksource_timer: can't register clocksource\n");
+       return clocksource_register_hz(&samsung_clocksource, clock_rate);
 }
 
 static void __init samsung_timer_resources(void)
@@ -380,26 +377,31 @@ static void __init samsung_timer_resources(void)
 /*
  * PWM master driver
  */
-static void __init _samsung_pwm_clocksource_init(void)
+static int __init _samsung_pwm_clocksource_init(void)
 {
        u8 mask;
        int channel;
 
        mask = ~pwm.variant.output_mask & ((1 << SAMSUNG_PWM_NUM) - 1);
        channel = fls(mask) - 1;
-       if (channel < 0)
-               panic("failed to find PWM channel for clocksource");
+       if (channel < 0) {
+               pr_crit("failed to find PWM channel for clocksource");
+               return -EINVAL;
+       }
        pwm.source_id = channel;
 
        mask &= ~(1 << channel);
        channel = fls(mask) - 1;
-       if (channel < 0)
-               panic("failed to find PWM channel for clock event");
+       if (channel < 0) {
+               pr_crit("failed to find PWM channel for clock event");
+               return -EINVAL;
+       }
        pwm.event_id = channel;
 
        samsung_timer_resources();
        samsung_clockevent_init();
-       samsung_clocksource_init();
+
+       return samsung_clocksource_init();
 }
 
 void __init samsung_pwm_clocksource_init(void __iomem *base,
@@ -417,8 +419,8 @@ void __init samsung_pwm_clocksource_init(void __iomem *base,
 }
 
 #ifdef CONFIG_CLKSRC_OF
-static void __init samsung_pwm_alloc(struct device_node *np,
-                                    const struct samsung_pwm_variant *variant)
+static int __init samsung_pwm_alloc(struct device_node *np,
+                                   const struct samsung_pwm_variant *variant)
 {
        struct property *prop;
        const __be32 *cur;
@@ -441,14 +443,16 @@ static void __init samsung_pwm_alloc(struct device_node *np,
        pwm.base = of_iomap(np, 0);
        if (!pwm.base) {
                pr_err("%s: failed to map PWM registers\n", __func__);
-               return;
+               return -ENXIO;
        }
 
        pwm.timerclk = of_clk_get_by_name(np, "timers");
-       if (IS_ERR(pwm.timerclk))
-               panic("failed to get timers clock for timer");
+       if (IS_ERR(pwm.timerclk)) {
+               pr_crit("failed to get timers clock for timer");
+               return PTR_ERR(pwm.timerclk);
+       }
 
-       _samsung_pwm_clocksource_init();
+       return _samsung_pwm_clocksource_init();
 }
 
 static const struct samsung_pwm_variant s3c24xx_variant = {
@@ -458,9 +462,9 @@ static const struct samsung_pwm_variant s3c24xx_variant = {
        .tclk_mask      = (1 << 4),
 };
 
-static void __init s3c2410_pwm_clocksource_init(struct device_node *np)
+static int __init s3c2410_pwm_clocksource_init(struct device_node *np)
 {
-       samsung_pwm_alloc(np, &s3c24xx_variant);
+       return samsung_pwm_alloc(np, &s3c24xx_variant);
 }
 CLOCKSOURCE_OF_DECLARE(s3c2410_pwm, "samsung,s3c2410-pwm", s3c2410_pwm_clocksource_init);
 
@@ -471,9 +475,9 @@ static const struct samsung_pwm_variant s3c64xx_variant = {
        .tclk_mask      = (1 << 7) | (1 << 6) | (1 << 5),
 };
 
-static void __init s3c64xx_pwm_clocksource_init(struct device_node *np)
+static int __init s3c64xx_pwm_clocksource_init(struct device_node *np)
 {
-       samsung_pwm_alloc(np, &s3c64xx_variant);
+       return samsung_pwm_alloc(np, &s3c64xx_variant);
 }
 CLOCKSOURCE_OF_DECLARE(s3c6400_pwm, "samsung,s3c6400-pwm", s3c64xx_pwm_clocksource_init);
 
@@ -484,9 +488,9 @@ static const struct samsung_pwm_variant s5p64x0_variant = {
        .tclk_mask      = 0,
 };
 
-static void __init s5p64x0_pwm_clocksource_init(struct device_node *np)
+static int __init s5p64x0_pwm_clocksource_init(struct device_node *np)
 {
-       samsung_pwm_alloc(np, &s5p64x0_variant);
+       return samsung_pwm_alloc(np, &s5p64x0_variant);
 }
 CLOCKSOURCE_OF_DECLARE(s5p6440_pwm, "samsung,s5p6440-pwm", s5p64x0_pwm_clocksource_init);
 
@@ -497,9 +501,9 @@ static const struct samsung_pwm_variant s5p_variant = {
        .tclk_mask      = (1 << 5),
 };
 
-static void __init s5p_pwm_clocksource_init(struct device_node *np)
+static int __init s5p_pwm_clocksource_init(struct device_node *np)
 {
-       samsung_pwm_alloc(np, &s5p_variant);
+       return samsung_pwm_alloc(np, &s5p_variant);
 }
 CLOCKSOURCE_OF_DECLARE(s5pc100_pwm, "samsung,s5pc100-pwm", s5p_pwm_clocksource_init);
 #endif
index 6f3719d73390fb64d9b7cc0ad83636f1efcbcf85..97669ee4df2a6625f9e69b83d6d479898aee8515 100644 (file)
@@ -146,7 +146,7 @@ static u64 notrace sun4i_timer_sched_read(void)
        return ~readl(timer_base + TIMER_CNTVAL_REG(1));
 }
 
-static void __init sun4i_timer_init(struct device_node *node)
+static int __init sun4i_timer_init(struct device_node *node)
 {
        unsigned long rate = 0;
        struct clk *clk;
@@ -154,17 +154,28 @@ static void __init sun4i_timer_init(struct device_node *node)
        u32 val;
 
        timer_base = of_iomap(node, 0);
-       if (!timer_base)
-               panic("Can't map registers");
+       if (!timer_base) {
+               pr_crit("Can't map registers");
+               return -ENXIO;
+       }
 
        irq = irq_of_parse_and_map(node, 0);
-       if (irq <= 0)
-               panic("Can't parse IRQ");
+       if (irq <= 0) {
+               pr_crit("Can't parse IRQ");
+               return -EINVAL;
+       }
 
        clk = of_clk_get(node, 0);
-       if (IS_ERR(clk))
-               panic("Can't get timer clock");
-       clk_prepare_enable(clk);
+       if (IS_ERR(clk)) {
+               pr_crit("Can't get timer clock");
+               return PTR_ERR(clk);
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret) {
+               pr_err("Failed to prepare clock");
+               return ret;
+       }
 
        rate = clk_get_rate(clk);
 
@@ -182,8 +193,12 @@ static void __init sun4i_timer_init(struct device_node *node)
            of_machine_is_compatible("allwinner,sun5i-a10s"))
                sched_clock_register(sun4i_timer_sched_read, 32, rate);
 
-       clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
-                             rate, 350, 32, clocksource_mmio_readl_down);
+       ret = clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
+                                   rate, 350, 32, clocksource_mmio_readl_down);
+       if (ret) {
+               pr_err("Failed to register clocksource");
+               return ret;
+       }
 
        ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
 
@@ -200,12 +215,16 @@ static void __init sun4i_timer_init(struct device_node *node)
                                        TIMER_SYNC_TICKS, 0xffffffff);
 
        ret = setup_irq(irq, &sun4i_timer_irq);
-       if (ret)
-               pr_warn("failed to setup irq %d\n", irq);
+       if (ret) {
+               pr_err("failed to setup irq %d\n", irq);
+               return ret;
+       }
 
        /* Enable timer0 interrupt */
        val = readl(timer_base + TIMER_IRQ_EN_REG);
        writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
+
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
                       sun4i_timer_init);
index c407c47a32326a568f11b2f586c8e29aa3588572..12fcef8cf2d36758cadf8449c84f33dafde28acc 100644 (file)
@@ -19,7 +19,7 @@ static u64 notrace read_sched_clock(void)
        return read_xtal_counter();
 }
 
-static void __init tango_clocksource_init(struct device_node *np)
+static int __init tango_clocksource_init(struct device_node *np)
 {
        struct clk *clk;
        int xtal_freq, ret;
@@ -27,13 +27,13 @@ static void __init tango_clocksource_init(struct device_node *np)
        xtal_in_cnt = of_iomap(np, 0);
        if (xtal_in_cnt == NULL) {
                pr_err("%s: invalid address\n", np->full_name);
-               return;
+               return -ENXIO;
        }
 
        clk = of_clk_get(np, 0);
        if (IS_ERR(clk)) {
                pr_err("%s: invalid clock\n", np->full_name);
-               return;
+               return PTR_ERR(clk);
        }
 
        xtal_freq = clk_get_rate(clk);
@@ -44,11 +44,13 @@ static void __init tango_clocksource_init(struct device_node *np)
                                    32, clocksource_mmio_readl_up);
        if (ret) {
                pr_err("%s: registration failed\n", np->full_name);
-               return;
+               return ret;
        }
 
        sched_clock_register(read_sched_clock, 32, xtal_freq);
        register_current_timer_delay(&delay_timer);
+
+       return 0;
 }
 
 CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init);
index 7b94ad2ab27861192f08601cd5cb787d94111806..f960891aa04e730283c1c170b20ab63bc837e18d 100644 (file)
@@ -165,7 +165,7 @@ static struct irqaction tegra_timer_irq = {
        .dev_id         = &tegra_clockevent,
 };
 
-static void __init tegra20_init_timer(struct device_node *np)
+static int __init tegra20_init_timer(struct device_node *np)
 {
        struct clk *clk;
        unsigned long rate;
@@ -174,13 +174,13 @@ static void __init tegra20_init_timer(struct device_node *np)
        timer_reg_base = of_iomap(np, 0);
        if (!timer_reg_base) {
                pr_err("Can't map timer registers\n");
-               BUG();
+               return -ENXIO;
        }
 
        tegra_timer_irq.irq = irq_of_parse_and_map(np, 2);
        if (tegra_timer_irq.irq <= 0) {
                pr_err("Failed to map timer IRQ\n");
-               BUG();
+               return -EINVAL;
        }
 
        clk = of_clk_get(np, 0);
@@ -211,10 +211,12 @@ static void __init tegra20_init_timer(struct device_node *np)
 
        sched_clock_register(tegra_read_sched_clock, 32, 1000000);
 
-       if (clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
-               "timer_us", 1000000, 300, 32, clocksource_mmio_readl_up)) {
+       ret = clocksource_mmio_init(timer_reg_base + TIMERUS_CNTR_1US,
+                                   "timer_us", 1000000, 300, 32,
+                                   clocksource_mmio_readl_up);
+       if (ret) {
                pr_err("Failed to register clocksource\n");
-               BUG();
+               return ret;
        }
 
        tegra_delay_timer.read_current_timer =
@@ -225,24 +227,26 @@ static void __init tegra20_init_timer(struct device_node *np)
        ret = setup_irq(tegra_timer_irq.irq, &tegra_timer_irq);
        if (ret) {
                pr_err("Failed to register timer IRQ: %d\n", ret);
-               BUG();
+               return ret;
        }
 
        tegra_clockevent.cpumask = cpu_all_mask;
        tegra_clockevent.irq = tegra_timer_irq.irq;
        clockevents_config_and_register(&tegra_clockevent, 1000000,
                                        0x1, 0x1fffffff);
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(tegra20_timer, "nvidia,tegra20-timer", tegra20_init_timer);
 
-static void __init tegra20_init_rtc(struct device_node *np)
+static int __init tegra20_init_rtc(struct device_node *np)
 {
        struct clk *clk;
 
        rtc_base = of_iomap(np, 0);
        if (!rtc_base) {
                pr_err("Can't map RTC registers");
-               BUG();
+               return -ENXIO;
        }
 
        /*
@@ -255,6 +259,6 @@ static void __init tegra20_init_rtc(struct device_node *np)
        else
                clk_prepare_enable(clk);
 
-       register_persistent_clock(NULL, tegra_read_persistent_clock64);
+       return register_persistent_clock(NULL, tegra_read_persistent_clock64);
 }
 CLOCKSOURCE_OF_DECLARE(tegra20_rtc, "nvidia,tegra20-rtc", tegra20_init_rtc);
index d93ec3c4f139f2bbcf1ada62efaeed9e6062fb66..20ec066481fe2bcddd05518c851b6c43bdc5f2b8 100644 (file)
@@ -246,7 +246,7 @@ static void armada_370_xp_timer_resume(void)
        writel(timer0_local_ctrl_reg, local_base + TIMER_CTRL_OFF);
 }
 
-struct syscore_ops armada_370_xp_timer_syscore_ops = {
+static struct syscore_ops armada_370_xp_timer_syscore_ops = {
        .suspend        = armada_370_xp_timer_suspend,
        .resume         = armada_370_xp_timer_resume,
 };
@@ -260,14 +260,22 @@ static struct delay_timer armada_370_delay_timer = {
        .read_current_timer = armada_370_delay_timer_read,
 };
 
-static void __init armada_370_xp_timer_common_init(struct device_node *np)
+static int __init armada_370_xp_timer_common_init(struct device_node *np)
 {
        u32 clr = 0, set = 0;
        int res;
 
        timer_base = of_iomap(np, 0);
-       WARN_ON(!timer_base);
+       if (!timer_base) {
+               pr_err("Failed to iomap");
+               return -ENXIO;
+       }
+
        local_base = of_iomap(np, 1);
+       if (!local_base) {
+               pr_err("Failed to iomap");
+               return -ENXIO;
+       }
 
        if (timer25Mhz) {
                set = TIMER0_25MHZ;             
@@ -306,14 +314,19 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
         */
        sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
 
-       clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
-                             "armada_370_xp_clocksource",
-                             timer_clk, 300, 32, clocksource_mmio_readl_down);
+       res = clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
+                                   "armada_370_xp_clocksource",
+                                   timer_clk, 300, 32, clocksource_mmio_readl_down);
+       if (res) {
+               pr_err("Failed to initialize clocksource mmio");
+               return res;
+       }
 
        register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
 
        armada_370_xp_evt = alloc_percpu(struct clock_event_device);
-
+       if (!armada_370_xp_evt)
+               return -ENOMEM;
 
        /*
         * Setup clockevent timer (interrupt-driven).
@@ -323,33 +336,54 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
                                "armada_370_xp_per_cpu_tick",
                                armada_370_xp_evt);
        /* Immediately configure the timer on the boot CPU */
-       if (!res)
-               armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+       if (res) {
+               pr_err("Failed to request percpu irq");
+               return res;
+       }
+
+       res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+       if (res) {
+               pr_err("Failed to setup timer");
+               return res;
+       }
 
        register_syscore_ops(&armada_370_xp_timer_syscore_ops);
+       
+       return 0;
 }
 
-static void __init armada_xp_timer_init(struct device_node *np)
+static int __init armada_xp_timer_init(struct device_node *np)
 {
        struct clk *clk = of_clk_get_by_name(np, "fixed");
+       int ret;
+
+       clk = of_clk_get(np, 0);
+       if (IS_ERR(clk)) {
+               pr_err("Failed to get clock");
+               return PTR_ERR(clk);
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret)
+               return ret;
 
-       /* The 25Mhz fixed clock is mandatory, and must always be available */
-       BUG_ON(IS_ERR(clk));
-       clk_prepare_enable(clk);
        timer_clk = clk_get_rate(clk);
 
-       armada_370_xp_timer_common_init(np);
+       return armada_370_xp_timer_common_init(np);
 }
 CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer",
                       armada_xp_timer_init);
 
-static void __init armada_375_timer_init(struct device_node *np)
+static int __init armada_375_timer_init(struct device_node *np)
 {
        struct clk *clk;
+       int ret;
 
        clk = of_clk_get_by_name(np, "fixed");
        if (!IS_ERR(clk)) {
-               clk_prepare_enable(clk);
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       return ret;
                timer_clk = clk_get_rate(clk);
        } else {
 
@@ -360,27 +394,43 @@ static void __init armada_375_timer_init(struct device_node *np)
                clk = of_clk_get(np, 0);
 
                /* Must have at least a clock */
-               BUG_ON(IS_ERR(clk));
-               clk_prepare_enable(clk);
+               if (IS_ERR(clk)) {
+                       pr_err("Failed to get clock");
+                       return PTR_ERR(clk);
+               }
+
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       return ret;
+
                timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
                timer25Mhz = false;
        }
 
-       armada_370_xp_timer_common_init(np);
+       return armada_370_xp_timer_common_init(np);
 }
 CLOCKSOURCE_OF_DECLARE(armada_375, "marvell,armada-375-timer",
                       armada_375_timer_init);
 
-static void __init armada_370_timer_init(struct device_node *np)
+static int __init armada_370_timer_init(struct device_node *np)
 {
-       struct clk *clk = of_clk_get(np, 0);
+       struct clk *clk;
+       int ret;
+
+       clk = of_clk_get(np, 0);
+       if (IS_ERR(clk)) {
+               pr_err("Failed to get clock");
+               return PTR_ERR(clk);
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret)
+               return ret;
 
-       BUG_ON(IS_ERR(clk));
-       clk_prepare_enable(clk);
        timer_clk = clk_get_rate(clk) / TIMER_DIVIDER;
        timer25Mhz = false;
 
-       armada_370_xp_timer_common_init(np);
+       return armada_370_xp_timer_common_init(np);
 }
 CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer",
                       armada_370_timer_init);
index b06e4c2be4068f540700e515e0e657522517f3c3..5ac344b383e1c4bee8f3c36913ee09dbea22201c 100644 (file)
@@ -233,10 +233,15 @@ static int __init efm32_clockevent_init(struct device_node *np)
                                        DIV_ROUND_CLOSEST(rate, 1024),
                                        0xf, 0xffff);
 
-       setup_irq(irq, &efm32_clock_event_irq);
+       ret = setup_irq(irq, &efm32_clock_event_irq);
+       if (ret) {
+               pr_err("Failed setup irq");
+               goto err_setup_irq;
+       }
 
        return 0;
 
+err_setup_irq:
 err_get_irq:
 
        iounmap(base);
@@ -255,16 +260,16 @@ err_clk_get:
  * This function asserts that we have exactly one clocksource and one
  * clock_event_device in the end.
  */
-static void __init efm32_timer_init(struct device_node *np)
+static int __init efm32_timer_init(struct device_node *np)
 {
        static int has_clocksource, has_clockevent;
-       int ret;
+       int ret = 0;
 
        if (!has_clocksource) {
                ret = efm32_clocksource_init(np);
                if (!ret) {
                        has_clocksource = 1;
-                       return;
+                       return 0;
                }
        }
 
@@ -272,9 +277,11 @@ static void __init efm32_timer_init(struct device_node *np)
                ret = efm32_clockevent_init(np);
                if (!ret) {
                        has_clockevent = 1;
-                       return;
+                       return 0;
                }
        }
+
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(efm32compat, "efm32,timer", efm32_timer_init);
 CLOCKSOURCE_OF_DECLARE(efm32, "energymicro,efm32-timer", efm32_timer_init);
index daae61e8c820efd3ca9cef3ef074d2d780e54a8e..9649cfdb92137e24a571b4e5d0871486d8805796 100644 (file)
@@ -288,16 +288,16 @@ err_clk_enable:
  * This function asserts that we have exactly one clocksource and one
  * clock_event_device in the end.
  */
-static void __init lpc32xx_timer_init(struct device_node *np)
+static int __init lpc32xx_timer_init(struct device_node *np)
 {
        static int has_clocksource, has_clockevent;
-       int ret;
+       int ret = 0;
 
        if (!has_clocksource) {
                ret = lpc32xx_clocksource_init(np);
                if (!ret) {
                        has_clocksource = 1;
-                       return;
+                       return 0;
                }
        }
 
@@ -305,8 +305,10 @@ static void __init lpc32xx_timer_init(struct device_node *np)
                ret = lpc32xx_clockevent_init(np);
                if (!ret) {
                        has_clockevent = 1;
-                       return;
+                       return 0;
                }
        }
+
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
index 0ece7427b497844848b89a09f4b4d9f0a7067624..a28f496e97cfb7df1da8cee1cc0adc07e40164bf 100644 (file)
@@ -104,25 +104,36 @@ static struct irqaction orion_clkevt_irq = {
        .handler        = orion_clkevt_irq_handler,
 };
 
-static void __init orion_timer_init(struct device_node *np)
+static int __init orion_timer_init(struct device_node *np)
 {
        struct clk *clk;
-       int irq;
+       int irq, ret;
 
        /* timer registers are shared with watchdog timer */
        timer_base = of_iomap(np, 0);
-       if (!timer_base)
-               panic("%s: unable to map resource\n", np->name);
+       if (!timer_base) {
+               pr_err("%s: unable to map resource\n", np->name);
+               return -ENXIO;
+       }
 
        clk = of_clk_get(np, 0);
-       if (IS_ERR(clk))
-               panic("%s: unable to get clk\n", np->name);
-       clk_prepare_enable(clk);
+       if (IS_ERR(clk)) {
+               pr_err("%s: unable to get clk\n", np->name);
+               return PTR_ERR(clk);
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret) {
+               pr_err("Failed to prepare clock");
+               return ret;
+       }
 
        /* we are only interested in timer1 irq */
        irq = irq_of_parse_and_map(np, 1);
-       if (irq <= 0)
-               panic("%s: unable to parse timer1 irq\n", np->name);
+       if (irq <= 0) {
+               pr_err("%s: unable to parse timer1 irq\n", np->name);
+               return -EINVAL;
+       }
 
        /* setup timer0 as free-running clocksource */
        writel(~0, timer_base + TIMER0_VAL);
@@ -130,19 +141,30 @@ static void __init orion_timer_init(struct device_node *np)
        atomic_io_modify(timer_base + TIMER_CTRL,
                TIMER0_RELOAD_EN | TIMER0_EN,
                TIMER0_RELOAD_EN | TIMER0_EN);
-       clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
-                             clk_get_rate(clk), 300, 32,
-                             clocksource_mmio_readl_down);
+
+       ret = clocksource_mmio_init(timer_base + TIMER0_VAL, "orion_clocksource",
+                                   clk_get_rate(clk), 300, 32,
+                                   clocksource_mmio_readl_down);
+       if (ret) {
+               pr_err("Failed to initialize mmio timer");
+               return ret;
+       }
+
        sched_clock_register(orion_read_sched_clock, 32, clk_get_rate(clk));
 
        /* setup timer1 as clockevent timer */
-       if (setup_irq(irq, &orion_clkevt_irq))
-               panic("%s: unable to setup irq\n", np->name);
+       ret = setup_irq(irq, &orion_clkevt_irq);
+       if (ret) {
+               pr_err("%s: unable to setup irq\n", np->name);
+               return ret;
+       }
 
        ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
        orion_clkevt.cpumask = cpumask_of(0);
        orion_clkevt.irq = irq;
        clockevents_config_and_register(&orion_clkevt, clk_get_rate(clk),
                                        ORION_ONESHOT_MIN, ORION_ONESHOT_MAX);
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
index 376e59bc5fa06585baa9799558a451e27e3cdd6d..a7d9a08e4b0e31b24640d4e188746aa25139bc1b 100644 (file)
@@ -148,7 +148,7 @@ static struct pistachio_clocksource pcs_gpt = {
                },
 };
 
-static void __init pistachio_clksrc_of_init(struct device_node *node)
+static int __init pistachio_clksrc_of_init(struct device_node *node)
 {
        struct clk *sys_clk, *fast_clk;
        struct regmap *periph_regs;
@@ -158,45 +158,45 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
        pcs_gpt.base = of_iomap(node, 0);
        if (!pcs_gpt.base) {
                pr_err("cannot iomap\n");
-               return;
+               return -ENXIO;
        }
 
        periph_regs = syscon_regmap_lookup_by_phandle(node, "img,cr-periph");
        if (IS_ERR(periph_regs)) {
                pr_err("cannot get peripheral regmap (%ld)\n",
                       PTR_ERR(periph_regs));
-               return;
+               return PTR_ERR(periph_regs);
        }
 
        /* Switch to using the fast counter clock */
        ret = regmap_update_bits(periph_regs, PERIP_TIMER_CONTROL,
                                 0xf, 0x0);
        if (ret)
-               return;
+               return ret;
 
        sys_clk = of_clk_get_by_name(node, "sys");
        if (IS_ERR(sys_clk)) {
                pr_err("clock get failed (%ld)\n", PTR_ERR(sys_clk));
-               return;
+               return PTR_ERR(sys_clk);
        }
 
        fast_clk = of_clk_get_by_name(node, "fast");
        if (IS_ERR(fast_clk)) {
                pr_err("clock get failed (%lu)\n", PTR_ERR(fast_clk));
-               return;
+               return PTR_ERR(fast_clk);
        }
 
        ret = clk_prepare_enable(sys_clk);
        if (ret < 0) {
                pr_err("failed to enable clock (%d)\n", ret);
-               return;
+               return ret;
        }
 
        ret = clk_prepare_enable(fast_clk);
        if (ret < 0) {
                pr_err("failed to enable clock (%d)\n", ret);
                clk_disable_unprepare(sys_clk);
-               return;
+               return ret;
        }
 
        rate = clk_get_rate(fast_clk);
@@ -212,7 +212,7 @@ static void __init pistachio_clksrc_of_init(struct device_node *node)
 
        raw_spin_lock_init(&pcs_gpt.lock);
        sched_clock_register(pistachio_read_sched_clock, 32, rate);
-       clocksource_register_hz(&pcs_gpt.cs, rate);
+       return clocksource_register_hz(&pcs_gpt.cs, rate);
 }
 CLOCKSOURCE_OF_DECLARE(pistachio_gptimer, "img,pistachio-gptimer",
                       pistachio_clksrc_of_init);
index 27fa13680be183e981f524781ae6c122109d2888..90f8fbc154a4f5b1c5bdb98021b0144a75751d9e 100644 (file)
@@ -238,7 +238,7 @@ static struct notifier_block sirfsoc_cpu_nb = {
        .notifier_call = sirfsoc_cpu_notify,
 };
 
-static void __init sirfsoc_clockevent_init(void)
+static int __init sirfsoc_clockevent_init(void)
 {
        sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
        BUG_ON(!sirfsoc_clockevent);
@@ -246,11 +246,11 @@ static void __init sirfsoc_clockevent_init(void)
        BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
 
        /* Immediately configure the timer on the boot CPU */
-       sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
+       return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
 }
 
 /* initialize the kernel jiffy timer source */
-static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
+static int __init sirfsoc_atlas7_timer_init(struct device_node *np)
 {
        struct clk *clk;
 
@@ -279,23 +279,29 @@ static void __init sirfsoc_atlas7_timer_init(struct device_node *np)
 
        BUG_ON(clocksource_register_hz(&sirfsoc_clocksource, atlas7_timer_rate));
 
-       sirfsoc_clockevent_init();
+       return sirfsoc_clockevent_init();
 }
 
-static void __init sirfsoc_of_timer_init(struct device_node *np)
+static int __init sirfsoc_of_timer_init(struct device_node *np)
 {
        sirfsoc_timer_base = of_iomap(np, 0);
-       if (!sirfsoc_timer_base)
-               panic("unable to map timer cpu registers\n");
+       if (!sirfsoc_timer_base) {
+               pr_err("unable to map timer cpu registers\n");
+               return -ENXIO;
+       }
 
        sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
-       if (!sirfsoc_timer_irq.irq)
-               panic("No irq passed for timer0 via DT\n");
+       if (!sirfsoc_timer_irq.irq) {
+               pr_err("No irq passed for timer0 via DT\n");
+               return -EINVAL;
+       }
 
        sirfsoc_timer1_irq.irq = irq_of_parse_and_map(np, 1);
-       if (!sirfsoc_timer1_irq.irq)
-               panic("No irq passed for timer1 via DT\n");
+       if (!sirfsoc_timer1_irq.irq) {
+               pr_err("No irq passed for timer1 via DT\n");
+               return -EINVAL;
+       }
 
-       sirfsoc_atlas7_timer_init(np);
+       return sirfsoc_atlas7_timer_init(np);
 }
 CLOCKSOURCE_OF_DECLARE(sirfsoc_atlas7_timer, "sirf,atlas7-tick", sirfsoc_of_timer_init);
index d911c5dca8f17ead5321ced4bd0e0410813fe4ec..1ffac0cb0cb78496d684e987d7b17f3b99a28775 100644 (file)
@@ -177,7 +177,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
 /*
  * Set up both clocksource and clockevent support.
  */
-static void __init at91sam926x_pit_common_init(struct pit_data *data)
+static int __init at91sam926x_pit_common_init(struct pit_data *data)
 {
        unsigned long   pit_rate;
        unsigned        bits;
@@ -204,14 +204,21 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
        data->clksrc.rating = 175;
        data->clksrc.read = read_pit_clk;
        data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
-       clocksource_register_hz(&data->clksrc, pit_rate);
+       
+       ret = clocksource_register_hz(&data->clksrc, pit_rate);
+       if (ret) {
+               pr_err("Failed to register clocksource");
+               return ret;
+       }
 
        /* Set up irq handler */
        ret = request_irq(data->irq, at91sam926x_pit_interrupt,
                          IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
                          "at91_tick", data);
-       if (ret)
-               panic(pr_fmt("Unable to setup IRQ\n"));
+       if (ret) {
+               pr_err("Unable to setup IRQ\n");
+               return ret;
+       }
 
        /* Set up and register clockevents */
        data->clkevt.name = "pit";
@@ -226,34 +233,42 @@ static void __init at91sam926x_pit_common_init(struct pit_data *data)
        data->clkevt.resume = at91sam926x_pit_resume;
        data->clkevt.suspend = at91sam926x_pit_suspend;
        clockevents_register_device(&data->clkevt);
+
+       return 0;
 }
 
-static void __init at91sam926x_pit_dt_init(struct device_node *node)
+static int __init at91sam926x_pit_dt_init(struct device_node *node)
 {
        struct pit_data *data;
 
        data = kzalloc(sizeof(*data), GFP_KERNEL);
        if (!data)
-               panic(pr_fmt("Unable to allocate memory\n"));
+               return -ENOMEM;
 
        data->base = of_iomap(node, 0);
-       if (!data->base)
-               panic(pr_fmt("Could not map PIT address\n"));
+       if (!data->base) {
+               pr_err("Could not map PIT address\n");
+               return -ENXIO;
+       }
 
        data->mck = of_clk_get(node, 0);
        if (IS_ERR(data->mck))
                /* Fallback on clkdev for !CCF-based boards */
                data->mck = clk_get(NULL, "mck");
 
-       if (IS_ERR(data->mck))
-               panic(pr_fmt("Unable to get mck clk\n"));
+       if (IS_ERR(data->mck)) {
+               pr_err("Unable to get mck clk\n");
+               return PTR_ERR(data->mck);
+       }
 
        /* Get the interrupts property */
        data->irq = irq_of_parse_and_map(node, 0);
-       if (!data->irq)
-               panic(pr_fmt("Unable to get IRQ from DT\n"));
+       if (!data->irq) {
+               pr_err("Unable to get IRQ from DT\n");
+               return -EINVAL;
+       }
 
-       at91sam926x_pit_common_init(data);
+       return at91sam926x_pit_common_init(data);
 }
 CLOCKSOURCE_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
                       at91sam926x_pit_dt_init);
index 29d21d68df5a231d78f586b2ac64ce56ed8e68b1..e90ab5b63a9068ec3901b85cab9b230db67a837c 100644 (file)
@@ -194,15 +194,17 @@ static struct clock_event_device clkevt = {
 /*
  * ST (system timer) module supports both clockevents and clocksource.
  */
-static void __init atmel_st_timer_init(struct device_node *node)
+static int __init atmel_st_timer_init(struct device_node *node)
 {
        struct clk *sclk;
        unsigned int sclk_rate, val;
        int irq, ret;
 
        regmap_st = syscon_node_to_regmap(node);
-       if (IS_ERR(regmap_st))
-               panic(pr_fmt("Unable to get regmap\n"));
+       if (IS_ERR(regmap_st)) {
+               pr_err("Unable to get regmap\n");
+               return PTR_ERR(regmap_st);
+       }
 
        /* Disable all timer interrupts, and clear any pending ones */
        regmap_write(regmap_st, AT91_ST_IDR,
@@ -211,27 +213,37 @@ static void __init atmel_st_timer_init(struct device_node *node)
 
        /* Get the interrupts property */
        irq  = irq_of_parse_and_map(node, 0);
-       if (!irq)
-               panic(pr_fmt("Unable to get IRQ from DT\n"));
+       if (!irq) {
+               pr_err("Unable to get IRQ from DT\n");
+               return -EINVAL;
+       }
 
        /* Make IRQs happen for the system timer */
        ret = request_irq(irq, at91rm9200_timer_interrupt,
                          IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL,
                          "at91_tick", regmap_st);
-       if (ret)
-               panic(pr_fmt("Unable to setup IRQ\n"));
+       if (ret) {
+               pr_err("Unable to setup IRQ\n");
+               return ret;
+       }
 
        sclk = of_clk_get(node, 0);
-       if (IS_ERR(sclk))
-               panic(pr_fmt("Unable to get slow clock\n"));
+       if (IS_ERR(sclk)) {
+               pr_err("Unable to get slow clock\n");
+               return PTR_ERR(sclk);
+       }
 
-       clk_prepare_enable(sclk);
-       if (ret)
-               panic(pr_fmt("Could not enable slow clock\n"));
+       ret = clk_prepare_enable(sclk);
+       if (ret) {
+               pr_err("Could not enable slow clock\n");
+               return ret;
+       }
 
        sclk_rate = clk_get_rate(sclk);
-       if (!sclk_rate)
-               panic(pr_fmt("Invalid slow clock rate\n"));
+       if (!sclk_rate) {
+               pr_err("Invalid slow clock rate\n");
+               return -EINVAL;
+       }
        timer_latch = (sclk_rate + HZ / 2) / HZ;
 
        /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
@@ -246,7 +258,7 @@ static void __init atmel_st_timer_init(struct device_node *node)
                                        2, AT91_ST_ALMV);
 
        /* register clocksource */
-       clocksource_register_hz(&clk32k, sclk_rate);
+       return clocksource_register_hz(&clk32k, sclk_rate);
 }
 CLOCKSOURCE_OF_DECLARE(atmel_st_timer, "atmel,at91rm9200-st",
                       atmel_st_timer_init);
index a536eeb634d885fccf5b92f0d0cbeeec7f88baf2..10318cc99c0e8f82c5ff9b179a2aacee19b4b7ff 100644 (file)
@@ -63,7 +63,7 @@ struct digicolor_timer {
        int timer_id; /* one of TIMER_* */
 };
 
-struct digicolor_timer *dc_timer(struct clock_event_device *ce)
+static struct digicolor_timer *dc_timer(struct clock_event_device *ce)
 {
        return container_of(ce, struct digicolor_timer, ce);
 }
@@ -148,7 +148,7 @@ static u64 notrace digicolor_timer_sched_read(void)
        return ~readl(dc_timer_dev.base + COUNT(TIMER_B));
 }
 
-static void __init digicolor_timer_init(struct device_node *node)
+static int __init digicolor_timer_init(struct device_node *node)
 {
        unsigned long rate;
        struct clk *clk;
@@ -161,19 +161,19 @@ static void __init digicolor_timer_init(struct device_node *node)
        dc_timer_dev.base = of_iomap(node, 0);
        if (!dc_timer_dev.base) {
                pr_err("Can't map registers");
-               return;
+               return -ENXIO;
        }
 
        irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id);
        if (irq <= 0) {
                pr_err("Can't parse IRQ");
-               return;
+               return -EINVAL;
        }
 
        clk = of_clk_get(node, 0);
        if (IS_ERR(clk)) {
                pr_err("Can't get timer clock");
-               return;
+               return PTR_ERR(clk);
        }
        clk_prepare_enable(clk);
        rate = clk_get_rate(clk);
@@ -190,13 +190,17 @@ static void __init digicolor_timer_init(struct device_node *node)
        ret = request_irq(irq, digicolor_timer_interrupt,
                          IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC",
                          &dc_timer_dev.ce);
-       if (ret)
+       if (ret) {
                pr_warn("request of timer irq %d failed (%d)\n", irq, ret);
+               return ret;
+       }
 
        dc_timer_dev.ce.cpumask = cpu_possible_mask;
        dc_timer_dev.ce.irq = irq;
 
        clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff);
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer",
                       digicolor_timer_init);
index 99ec96769dda899817bf1ee41fd836a4e2bbbc53..f595460bfc589c51474abcef244663334b0316bd 100644 (file)
@@ -407,8 +407,10 @@ static const struct imx_gpt_data imx6dl_gpt_data = {
        .set_next_event = v2_set_next_event,
 };
 
-static void __init _mxc_timer_init(struct imx_timer *imxtm)
+static int __init _mxc_timer_init(struct imx_timer *imxtm)
 {
+       int ret;
+
        switch (imxtm->type) {
        case GPT_TYPE_IMX1:
                imxtm->gpt = &imx1_gpt_data;
@@ -423,12 +425,12 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
                imxtm->gpt = &imx6dl_gpt_data;
                break;
        default:
-               BUG();
+               return -EINVAL;
        }
 
        if (IS_ERR(imxtm->clk_per)) {
                pr_err("i.MX timer: unable to get clk\n");
-               return;
+               return PTR_ERR(imxtm->clk_per);
        }
 
        if (!IS_ERR(imxtm->clk_ipg))
@@ -446,8 +448,11 @@ static void __init _mxc_timer_init(struct imx_timer *imxtm)
        imxtm->gpt->gpt_setup_tctl(imxtm);
 
        /* init and register the timer to the framework */
-       mxc_clocksource_init(imxtm);
-       mxc_clockevent_init(imxtm);
+       ret = mxc_clocksource_init(imxtm);
+       if (ret)
+               return ret;
+
+       return mxc_clockevent_init(imxtm);
 }
 
 void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
@@ -469,21 +474,27 @@ void __init mxc_timer_init(unsigned long pbase, int irq, enum imx_gpt_type type)
        _mxc_timer_init(imxtm);
 }
 
-static void __init mxc_timer_init_dt(struct device_node *np,  enum imx_gpt_type type)
+static int __init mxc_timer_init_dt(struct device_node *np,  enum imx_gpt_type type)
 {
        struct imx_timer *imxtm;
        static int initialized;
+       int ret;
 
        /* Support one instance only */
        if (initialized)
-               return;
+               return 0;
 
        imxtm = kzalloc(sizeof(*imxtm), GFP_KERNEL);
-       BUG_ON(!imxtm);
+       if (!imxtm)
+               return -ENOMEM;
 
        imxtm->base = of_iomap(np, 0);
-       WARN_ON(!imxtm->base);
+       if (!imxtm->base)
+               return -ENXIO;
+
        imxtm->irq = irq_of_parse_and_map(np, 0);
+       if (imxtm->irq <= 0)
+               return -EINVAL;
 
        imxtm->clk_ipg = of_clk_get_by_name(np, "ipg");
 
@@ -494,22 +505,26 @@ static void __init mxc_timer_init_dt(struct device_node *np,  enum imx_gpt_type
 
        imxtm->type = type;
 
-       _mxc_timer_init(imxtm);
+       ret = _mxc_timer_init(imxtm);
+       if (ret)
+               return ret;
 
        initialized = 1;
+
+       return 0;
 }
 
-static void __init imx1_timer_init_dt(struct device_node *np)
+static int __init imx1_timer_init_dt(struct device_node *np)
 {
-       mxc_timer_init_dt(np, GPT_TYPE_IMX1);
+       return mxc_timer_init_dt(np, GPT_TYPE_IMX1);
 }
 
-static void __init imx21_timer_init_dt(struct device_node *np)
+static int __init imx21_timer_init_dt(struct device_node *np)
 {
-       mxc_timer_init_dt(np, GPT_TYPE_IMX21);
+       return mxc_timer_init_dt(np, GPT_TYPE_IMX21);
 }
 
-static void __init imx31_timer_init_dt(struct device_node *np)
+static int __init imx31_timer_init_dt(struct device_node *np)
 {
        enum imx_gpt_type type = GPT_TYPE_IMX31;
 
@@ -522,12 +537,12 @@ static void __init imx31_timer_init_dt(struct device_node *np)
        if (of_machine_is_compatible("fsl,imx6dl"))
                type = GPT_TYPE_IMX6DL;
 
-       mxc_timer_init_dt(np, type);
+       return mxc_timer_init_dt(np, type);
 }
 
-static void __init imx6dl_timer_init_dt(struct device_node *np)
+static int __init imx6dl_timer_init_dt(struct device_node *np)
 {
-       mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
+       return mxc_timer_init_dt(np, GPT_TYPE_IMX6DL);
 }
 
 CLOCKSOURCE_OF_DECLARE(imx1_timer, "fsl,imx1-gpt", imx1_timer_init_dt);
index 3f59ac2180dc68ac4a519aabe4b25697909e7c52..df6e672afc04c4a41b151b2e2119500e739812fc 100644 (file)
@@ -36,11 +36,12 @@ static u64 notrace integrator_read_sched_clock(void)
        return -readl(sched_clk_base + TIMER_VALUE);
 }
 
-static void integrator_clocksource_init(unsigned long inrate,
-                                       void __iomem *base)
+static int integrator_clocksource_init(unsigned long inrate,
+                                      void __iomem *base)
 {
        u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
        unsigned long rate = inrate;
+       int ret;
 
        if (rate >= 1500000) {
                rate /= 16;
@@ -50,11 +51,15 @@ static void integrator_clocksource_init(unsigned long inrate,
        writel(0xffff, base + TIMER_LOAD);
        writel(ctrl, base + TIMER_CTRL);
 
-       clocksource_mmio_init(base + TIMER_VALUE, "timer2",
-                       rate, 200, 16, clocksource_mmio_readl_down);
+       ret = clocksource_mmio_init(base + TIMER_VALUE, "timer2",
+                                   rate, 200, 16, clocksource_mmio_readl_down);
+       if (ret)
+               return ret;
 
        sched_clk_base = base;
        sched_clock_register(integrator_read_sched_clock, 16, rate);
+
+       return 0;
 }
 
 static unsigned long timer_reload;
@@ -138,11 +143,12 @@ static struct irqaction integrator_timer_irq = {
        .dev_id         = &integrator_clockevent,
 };
 
-static void integrator_clockevent_init(unsigned long inrate,
-                               void __iomem *base, int irq)
+static int integrator_clockevent_init(unsigned long inrate,
+                                     void __iomem *base, int irq)
 {
        unsigned long rate = inrate;
        unsigned int ctrl = 0;
+       int ret;
 
        clkevt_base = base;
        /* Calculate and program a divisor */
@@ -156,14 +162,18 @@ static void integrator_clockevent_init(unsigned long inrate,
        timer_reload = rate / HZ;
        writel(ctrl, clkevt_base + TIMER_CTRL);
 
-       setup_irq(irq, &integrator_timer_irq);
+       ret = setup_irq(irq, &integrator_timer_irq);
+       if (ret)
+               return ret;
+
        clockevents_config_and_register(&integrator_clockevent,
                                        rate,
                                        1,
                                        0xffffU);
+       return 0;
 }
 
-static void __init integrator_ap_timer_init_of(struct device_node *node)
+static int __init integrator_ap_timer_init_of(struct device_node *node)
 {
        const char *path;
        void __iomem *base;
@@ -176,12 +186,12 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
 
        base = of_io_request_and_map(node, 0, "integrator-timer");
        if (IS_ERR(base))
-               return;
+               return PTR_ERR(base);
 
        clk = of_clk_get(node, 0);
        if (IS_ERR(clk)) {
                pr_err("No clock for %s\n", node->name);
-               return;
+               return PTR_ERR(clk);
        }
        clk_prepare_enable(clk);
        rate = clk_get_rate(clk);
@@ -189,30 +199,37 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
 
        err = of_property_read_string(of_aliases,
                                "arm,timer-primary", &path);
-       if (WARN_ON(err))
-               return;
+       if (err) {
+               pr_warn("Failed to read property");
+               return err;
+       }
+
        pri_node = of_find_node_by_path(path);
+
        err = of_property_read_string(of_aliases,
                                "arm,timer-secondary", &path);
-       if (WARN_ON(err))
-               return;
+       if (err) {
+               pr_warn("Failed to read property");             
+               return err;
+       }
+
+
        sec_node = of_find_node_by_path(path);
 
-       if (node == pri_node) {
+       if (node == pri_node)
                /* The primary timer lacks IRQ, use as clocksource */
-               integrator_clocksource_init(rate, base);
-               return;
-       }
+               return integrator_clocksource_init(rate, base);
 
        if (node == sec_node) {
                /* The secondary timer will drive the clock event */
                irq = irq_of_parse_and_map(node, 0);
-               integrator_clockevent_init(rate, base, irq);
-               return;
+               return integrator_clockevent_init(rate, base, irq);
        }
 
        pr_info("Timer @%p unused\n", base);
        clk_disable_unprepare(clk);
+
+       return 0;
 }
 
 CLOCKSOURCE_OF_DECLARE(integrator_ap_timer, "arm,integrator-timer",
index 1cea08cf603eb30d5028e157dc8927aef4a004a9..ab68a47ab3b45de8bf8713fcde121403f38fb258 100644 (file)
@@ -144,7 +144,7 @@ static int keystone_set_periodic(struct clock_event_device *evt)
        return 0;
 }
 
-static void __init keystone_timer_init(struct device_node *np)
+static int __init keystone_timer_init(struct device_node *np)
 {
        struct clock_event_device *event_dev = &timer.event_dev;
        unsigned long rate;
@@ -154,20 +154,20 @@ static void __init keystone_timer_init(struct device_node *np)
        irq  = irq_of_parse_and_map(np, 0);
        if (!irq) {
                pr_err("%s: failed to map interrupts\n", __func__);
-               return;
+               return -EINVAL;
        }
 
        timer.base = of_iomap(np, 0);
        if (!timer.base) {
                pr_err("%s: failed to map registers\n", __func__);
-               return;
+               return -ENXIO;
        }
 
        clk = of_clk_get(np, 0);
        if (IS_ERR(clk)) {
                pr_err("%s: failed to get clock\n", __func__);
                iounmap(timer.base);
-               return;
+               return PTR_ERR(clk);
        }
 
        error = clk_prepare_enable(clk);
@@ -219,11 +219,12 @@ static void __init keystone_timer_init(struct device_node *np)
        clockevents_config_and_register(event_dev, rate, 1, ULONG_MAX);
 
        pr_info("keystone timer clock @%lu Hz\n", rate);
-       return;
+       return 0;
 err:
        clk_put(clk);
        iounmap(timer.base);
+       return error;
 }
 
 CLOCKSOURCE_OF_DECLARE(keystone_timer, "ti,keystone-timer",
-                                       keystone_timer_init);
+                          keystone_timer_init);
index d46108920b2c9d497fa360ef07f4060e511e12c2..70c149af8ee0f5e3e861b83da2c3aa8ac78454df 100644 (file)
@@ -55,8 +55,8 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc)
        return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]);
 }
 
-static void __init nps_setup_clocksource(struct device_node *node,
-                                        struct clk *clk)
+static int __init nps_setup_clocksource(struct device_node *node,
+                                       struct clk *clk)
 {
        int ret, cluster;
 
@@ -68,7 +68,7 @@ static void __init nps_setup_clocksource(struct device_node *node,
        ret = clk_prepare_enable(clk);
        if (ret) {
                pr_err("Couldn't enable parent clock\n");
-               return;
+               return ret;
        }
 
        nps_timer_rate = clk_get_rate(clk);
@@ -79,19 +79,21 @@ static void __init nps_setup_clocksource(struct device_node *node,
                pr_err("Couldn't register clock source.\n");
                clk_disable_unprepare(clk);
        }
+
+       return ret;
 }
 
-static void __init nps_timer_init(struct device_node *node)
+static int __init nps_timer_init(struct device_node *node)
 {
        struct clk *clk;
 
        clk = of_clk_get(node, 0);
        if (IS_ERR(clk)) {
                pr_err("Can't get timer clock.\n");
-               return;
+               return PTR_ERR(clk);
        }
 
-       nps_setup_clocksource(node, clk);
+       return nps_setup_clocksource(node, clk);
 }
 
 CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer",
diff --git a/drivers/clocksource/timer-oxnas-rps.c b/drivers/clocksource/timer-oxnas-rps.c
new file mode 100644 (file)
index 0000000..bd887e2
--- /dev/null
@@ -0,0 +1,297 @@
+/*
+ * drivers/clocksource/timer-oxnas-rps.c
+ *
+ * Copyright (C) 2009 Oxford Semiconductor Ltd
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/clockchips.h>
+#include <linux/sched_clock.h>
+
+/* TIMER1 used as tick
+ * TIMER2 used as clocksource
+ */
+
+/* Registers definitions */
+
+#define TIMER_LOAD_REG         0x0
+#define TIMER_CURR_REG         0x4
+#define TIMER_CTRL_REG         0x8
+#define TIMER_CLRINT_REG       0xC
+
+#define TIMER_BITS             24
+
+#define TIMER_MAX_VAL          (BIT(TIMER_BITS) - 1)
+
+#define TIMER_PERIODIC         BIT(6)
+#define TIMER_ENABLE           BIT(7)
+
+#define TIMER_DIV1             (0)
+#define TIMER_DIV16            (1 << 2)
+#define TIMER_DIV256           (2 << 2)
+
+#define TIMER1_REG_OFFSET      0
+#define TIMER2_REG_OFFSET      0x20
+
+/* Clockevent & Clocksource data */
+
+struct oxnas_rps_timer {
+       struct clock_event_device clkevent;
+       void __iomem *clksrc_base;
+       void __iomem *clkevt_base;
+       unsigned long timer_period;
+       unsigned int timer_prescaler;
+       struct clk *clk;
+       int irq;
+};
+
+static irqreturn_t oxnas_rps_timer_irq(int irq, void *dev_id)
+{
+       struct oxnas_rps_timer *rps = dev_id;
+
+       writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
+
+       rps->clkevent.event_handler(&rps->clkevent);
+
+       return IRQ_HANDLED;
+}
+
+static void oxnas_rps_timer_config(struct oxnas_rps_timer *rps,
+                                  unsigned long period,
+                                  unsigned int periodic)
+{
+       uint32_t cfg = rps->timer_prescaler;
+
+       if (period)
+               cfg |= TIMER_ENABLE;
+
+       if (periodic)
+               cfg |= TIMER_PERIODIC;
+
+       writel_relaxed(period, rps->clkevt_base + TIMER_LOAD_REG);
+       writel_relaxed(cfg, rps->clkevt_base + TIMER_CTRL_REG);
+}
+
+static int oxnas_rps_timer_shutdown(struct clock_event_device *evt)
+{
+       struct oxnas_rps_timer *rps =
+               container_of(evt, struct oxnas_rps_timer, clkevent);
+
+       oxnas_rps_timer_config(rps, 0, 0);
+
+       return 0;
+}
+
+static int oxnas_rps_timer_set_periodic(struct clock_event_device *evt)
+{
+       struct oxnas_rps_timer *rps =
+               container_of(evt, struct oxnas_rps_timer, clkevent);
+
+       oxnas_rps_timer_config(rps, rps->timer_period, 1);
+
+       return 0;
+}
+
+static int oxnas_rps_timer_set_oneshot(struct clock_event_device *evt)
+{
+       struct oxnas_rps_timer *rps =
+               container_of(evt, struct oxnas_rps_timer, clkevent);
+
+       oxnas_rps_timer_config(rps, rps->timer_period, 0);
+
+       return 0;
+}
+
+static int oxnas_rps_timer_next_event(unsigned long delta,
+                               struct clock_event_device *evt)
+{
+       struct oxnas_rps_timer *rps =
+               container_of(evt, struct oxnas_rps_timer, clkevent);
+
+       oxnas_rps_timer_config(rps, delta, 0);
+
+       return 0;
+}
+
+static int __init oxnas_rps_clockevent_init(struct oxnas_rps_timer *rps)
+{
+       ulong clk_rate = clk_get_rate(rps->clk);
+       ulong timer_rate;
+
+       /* Start with prescaler 1 */
+       rps->timer_prescaler = TIMER_DIV1;
+       rps->timer_period = DIV_ROUND_UP(clk_rate, HZ);
+       timer_rate = clk_rate;
+
+       if (rps->timer_period > TIMER_MAX_VAL) {
+               rps->timer_prescaler = TIMER_DIV16;
+               timer_rate = clk_rate / 16;
+               rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
+       }
+       if (rps->timer_period > TIMER_MAX_VAL) {
+               rps->timer_prescaler = TIMER_DIV256;
+               timer_rate = clk_rate / 256;
+               rps->timer_period = DIV_ROUND_UP(timer_rate, HZ);
+       }
+
+       rps->clkevent.name = "oxnas-rps";
+       rps->clkevent.features = CLOCK_EVT_FEAT_PERIODIC |
+                                CLOCK_EVT_FEAT_ONESHOT |
+                                CLOCK_EVT_FEAT_DYNIRQ;
+       rps->clkevent.tick_resume = oxnas_rps_timer_shutdown;
+       rps->clkevent.set_state_shutdown = oxnas_rps_timer_shutdown;
+       rps->clkevent.set_state_periodic = oxnas_rps_timer_set_periodic;
+       rps->clkevent.set_state_oneshot = oxnas_rps_timer_set_oneshot;
+       rps->clkevent.set_next_event = oxnas_rps_timer_next_event;
+       rps->clkevent.rating = 200;
+       rps->clkevent.cpumask = cpu_possible_mask;
+       rps->clkevent.irq = rps->irq;
+       clockevents_config_and_register(&rps->clkevent,
+                                       timer_rate,
+                                       1,
+                                       TIMER_MAX_VAL);
+
+       pr_info("Registered clock event rate %luHz prescaler %x period %lu\n",
+                       clk_rate,
+                       rps->timer_prescaler,
+                       rps->timer_period);
+
+       return 0;
+}
+
+/* Clocksource */
+
+static void __iomem *timer_sched_base;
+
+static u64 notrace oxnas_rps_read_sched_clock(void)
+{
+       return ~readl_relaxed(timer_sched_base);
+}
+
+static int __init oxnas_rps_clocksource_init(struct oxnas_rps_timer *rps)
+{
+       ulong clk_rate = clk_get_rate(rps->clk);
+       int ret;
+
+       /* use prescale 16 */
+       clk_rate = clk_rate / 16;
+
+       writel_relaxed(TIMER_MAX_VAL, rps->clksrc_base + TIMER_LOAD_REG);
+       writel_relaxed(TIMER_PERIODIC | TIMER_ENABLE | TIMER_DIV16,
+                       rps->clksrc_base + TIMER_CTRL_REG);
+
+       timer_sched_base = rps->clksrc_base + TIMER_CURR_REG;
+       sched_clock_register(oxnas_rps_read_sched_clock,
+                            TIMER_BITS, clk_rate);
+       ret = clocksource_mmio_init(timer_sched_base,
+                                   "oxnas_rps_clocksource_timer",
+                                   clk_rate, 250, TIMER_BITS,
+                                   clocksource_mmio_readl_down);
+       if (WARN_ON(ret)) {
+               pr_err("can't register clocksource\n");
+               return ret;
+       }
+
+       pr_info("Registered clocksource rate %luHz\n", clk_rate);
+
+       return 0;
+}
+
+static int __init oxnas_rps_timer_init(struct device_node *np)
+{
+       struct oxnas_rps_timer *rps;
+       void __iomem *base;
+       int ret;
+
+       rps = kzalloc(sizeof(*rps), GFP_KERNEL);
+       if (!rps)
+               return -ENOMEM;
+
+       rps->clk = of_clk_get(np, 0);
+       if (IS_ERR(rps->clk)) {
+               ret = PTR_ERR(rps->clk);
+               goto err_alloc;
+       }
+
+       ret = clk_prepare_enable(rps->clk);
+       if (ret)
+               goto err_clk;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               ret = -ENXIO;
+               goto err_clk_prepare;
+       }
+
+       rps->irq = irq_of_parse_and_map(np, 0);
+       if (rps->irq < 0) {
+               ret = -EINVAL;
+               goto err_iomap;
+       }
+
+       rps->clkevt_base = base + TIMER1_REG_OFFSET;
+       rps->clksrc_base = base + TIMER2_REG_OFFSET;
+
+       /* Disable timers */
+       writel_relaxed(0, rps->clkevt_base + TIMER_CTRL_REG);
+       writel_relaxed(0, rps->clksrc_base + TIMER_CTRL_REG);
+       writel_relaxed(0, rps->clkevt_base + TIMER_LOAD_REG);
+       writel_relaxed(0, rps->clksrc_base + TIMER_LOAD_REG);
+       writel_relaxed(0, rps->clkevt_base + TIMER_CLRINT_REG);
+       writel_relaxed(0, rps->clksrc_base + TIMER_CLRINT_REG);
+
+       ret = request_irq(rps->irq, oxnas_rps_timer_irq,
+                         IRQF_TIMER | IRQF_IRQPOLL,
+                         "rps-timer", rps);
+       if (ret)
+               goto err_iomap;
+
+       ret = oxnas_rps_clocksource_init(rps);
+       if (ret)
+               goto err_irqreq;
+
+       ret = oxnas_rps_clockevent_init(rps);
+       if (ret)
+               goto err_irqreq;
+
+       return 0;
+
+err_irqreq:
+       free_irq(rps->irq, rps);
+err_iomap:
+       iounmap(base);
+err_clk_prepare:
+       clk_disable_unprepare(rps->clk);
+err_clk:
+       clk_put(rps->clk);
+err_alloc:
+       kfree(rps);
+
+       return ret;
+}
+
+CLOCKSOURCE_OF_DECLARE(ox810se_rps,
+                      "oxsemi,ox810se-rps-timer", oxnas_rps_timer_init);
index 2854c663e8b5b978ae463d1d96c218c79da77a5e..c32148ec7a384dc58921c40515786491033ff5e8 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
 #include <linux/sched_clock.h>
-#include <asm/mach/time.h>
 
 #define PRIMA2_CLOCK_FREQ 1000000
 
@@ -189,24 +188,36 @@ static void __init sirfsoc_clockevent_init(void)
 }
 
 /* initialize the kernel jiffy timer source */
-static void __init sirfsoc_prima2_timer_init(struct device_node *np)
+static int __init sirfsoc_prima2_timer_init(struct device_node *np)
 {
        unsigned long rate;
        struct clk *clk;
+       int ret;
 
        clk = of_clk_get(np, 0);
-       BUG_ON(IS_ERR(clk));
+       if (IS_ERR(clk)) {
+               pr_err("Failed to get clock");
+               return PTR_ERR(clk);
+       }
 
-       BUG_ON(clk_prepare_enable(clk));
+       ret = clk_prepare_enable(clk);
+       if (ret) {
+               pr_err("Failed to enable clock");
+               return ret;
+       }
 
        rate = clk_get_rate(clk);
 
-       BUG_ON(rate < PRIMA2_CLOCK_FREQ);
-       BUG_ON(rate % PRIMA2_CLOCK_FREQ);
+       if (rate < PRIMA2_CLOCK_FREQ || rate % PRIMA2_CLOCK_FREQ) {
+               pr_err("Invalid clock rate");
+               return -EINVAL;
+       }
 
        sirfsoc_timer_base = of_iomap(np, 0);
-       if (!sirfsoc_timer_base)
-               panic("unable to map timer cpu registers\n");
+       if (!sirfsoc_timer_base) {
+               pr_err("unable to map timer cpu registers\n");
+               return -ENXIO;
+       }
 
        sirfsoc_timer_irq.irq = irq_of_parse_and_map(np, 0);
 
@@ -216,14 +227,23 @@ static void __init sirfsoc_prima2_timer_init(struct device_node *np)
        writel_relaxed(0, sirfsoc_timer_base + SIRFSOC_TIMER_COUNTER_HI);
        writel_relaxed(BIT(0), sirfsoc_timer_base + SIRFSOC_TIMER_STATUS);
 
-       BUG_ON(clocksource_register_hz(&sirfsoc_clocksource,
-                                      PRIMA2_CLOCK_FREQ));
+       ret = clocksource_register_hz(&sirfsoc_clocksource, PRIMA2_CLOCK_FREQ);
+       if (ret) {
+               pr_err("Failed to register clocksource");
+               return ret;
+       }
 
        sched_clock_register(sirfsoc_read_sched_clock, 64, PRIMA2_CLOCK_FREQ);
 
-       BUG_ON(setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq));
+       ret = setup_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
+       if (ret) {
+               pr_err("Failed to setup irq");
+               return ret;
+       }
 
        sirfsoc_clockevent_init();
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(sirfsoc_prima2_timer,
        "sirf,prima2-tick", sirfsoc_prima2_timer_init);
index 5f45b9adef6058b0ec78c31637585ee2c9cdfbc1..d07863388e05e6920e4fe2fd3e1fc171f3b38bdc 100644 (file)
@@ -77,7 +77,7 @@ void __init sp804_timer_disable(void __iomem *base)
        writel(0, base + TIMER_CTRL);
 }
 
-void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
+int  __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
                                                     const char *name,
                                                     struct clk *clk,
                                                     int use_sched_clock)
@@ -89,14 +89,13 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
                if (IS_ERR(clk)) {
                        pr_err("sp804: clock not found: %d\n",
                               (int)PTR_ERR(clk));
-                       return;
+                       return PTR_ERR(clk);
                }
        }
 
        rate = sp804_get_clock_rate(clk);
-
        if (rate < 0)
-               return;
+               return -EINVAL;
 
        /* setup timer 0 as free-running clocksource */
        writel(0, base + TIMER_CTRL);
@@ -112,6 +111,8 @@ void __init __sp804_clocksource_and_sched_clock_init(void __iomem *base,
                sched_clock_base = base;
                sched_clock_register(sp804_read, 32, rate);
        }
+
+       return 0;
 }
 
 
@@ -186,7 +187,7 @@ static struct irqaction sp804_timer_irq = {
        .dev_id         = &sp804_clockevent,
 };
 
-void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
+int __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struct clk *clk, const char *name)
 {
        struct clock_event_device *evt = &sp804_clockevent;
        long rate;
@@ -196,12 +197,12 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
        if (IS_ERR(clk)) {
                pr_err("sp804: %s clock not found: %d\n", name,
                        (int)PTR_ERR(clk));
-               return;
+               return PTR_ERR(clk);
        }
 
        rate = sp804_get_clock_rate(clk);
        if (rate < 0)
-               return;
+               return -EINVAL;
 
        clkevt_base = base;
        clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
@@ -213,27 +214,31 @@ void __init __sp804_clockevents_init(void __iomem *base, unsigned int irq, struc
 
        setup_irq(irq, &sp804_timer_irq);
        clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
+
+       return 0;
 }
 
-static void __init sp804_of_init(struct device_node *np)
+static int __init sp804_of_init(struct device_node *np)
 {
        static bool initialized = false;
        void __iomem *base;
-       int irq;
+       int irq, ret = -EINVAL;
        u32 irq_num = 0;
        struct clk *clk1, *clk2;
        const char *name = of_get_property(np, "compatible", NULL);
 
        base = of_iomap(np, 0);
-       if (WARN_ON(!base))
-               return;
+       if (!base)
+               return -ENXIO;
 
        /* Ensure timers are disabled */
        writel(0, base + TIMER_CTRL);
        writel(0, base + TIMER_2_BASE + TIMER_CTRL);
 
-       if (initialized || !of_device_is_available(np))
+       if (initialized || !of_device_is_available(np)) {
+               ret = -EINVAL;
                goto err;
+       }
 
        clk1 = of_clk_get(np, 0);
        if (IS_ERR(clk1))
@@ -256,35 +261,53 @@ static void __init sp804_of_init(struct device_node *np)
 
        of_property_read_u32(np, "arm,sp804-has-irq", &irq_num);
        if (irq_num == 2) {
-               __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
-               __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+
+               ret = __sp804_clockevents_init(base + TIMER_2_BASE, irq, clk2, name);
+               if (ret)
+                       goto err;
+
+               ret = __sp804_clocksource_and_sched_clock_init(base, name, clk1, 1);
+               if (ret)
+                       goto err;
        } else {
-               __sp804_clockevents_init(base, irq, clk1 , name);
-               __sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
-                                                        name, clk2, 1);
+
+               ret = __sp804_clockevents_init(base, irq, clk1 , name);
+               if (ret)
+                       goto err;
+
+               ret =__sp804_clocksource_and_sched_clock_init(base + TIMER_2_BASE,
+                                                             name, clk2, 1);
+               if (ret)
+                       goto err;
        }
        initialized = true;
 
-       return;
+       return 0;
 err:
        iounmap(base);
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(sp804, "arm,sp804", sp804_of_init);
 
-static void __init integrator_cp_of_init(struct device_node *np)
+static int __init integrator_cp_of_init(struct device_node *np)
 {
        static int init_count = 0;
        void __iomem *base;
-       int irq;
+       int irq, ret = -EINVAL;
        const char *name = of_get_property(np, "compatible", NULL);
        struct clk *clk;
 
        base = of_iomap(np, 0);
-       if (WARN_ON(!base))
-               return;
+       if (!base) {
+               pr_err("Failed to iomap");
+               return -ENXIO;
+       }
+
        clk = of_clk_get(np, 0);
-       if (WARN_ON(IS_ERR(clk)))
-               return;
+       if (IS_ERR(clk)) {
+               pr_err("Failed to get clock");
+               return PTR_ERR(clk);
+       }
 
        /* Ensure timer is disabled */
        writel(0, base + TIMER_CTRL);
@@ -292,19 +315,24 @@ static void __init integrator_cp_of_init(struct device_node *np)
        if (init_count == 2 || !of_device_is_available(np))
                goto err;
 
-       if (!init_count)
-               __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
-       else {
+       if (!init_count) {
+               ret = __sp804_clocksource_and_sched_clock_init(base, name, clk, 0);
+               if (ret)
+                       goto err;
+       } else {
                irq = irq_of_parse_and_map(np, 0);
                if (irq <= 0)
                        goto err;
 
-               __sp804_clockevents_init(base, irq, clk, name);
+               ret = __sp804_clockevents_init(base, irq, clk, name);
+               if (ret)
+                       goto err;
        }
 
        init_count++;
-       return;
+       return 0;
 err:
        iounmap(base);
+       return ret;
 }
 CLOCKSOURCE_OF_DECLARE(intcp, "arm,integrator-cp-timer", integrator_cp_of_init);
index f3dcb76799b4e419e9dd72f21d25e73d7554dbfc..1b2574c4fb979e4a2597e5ef9e86b9177fcf6b98 100644 (file)
@@ -98,7 +98,7 @@ static struct stm32_clock_event_ddata clock_event_ddata = {
        },
 };
 
-static void __init stm32_clockevent_init(struct device_node *np)
+static int __init stm32_clockevent_init(struct device_node *np)
 {
        struct stm32_clock_event_ddata *data = &clock_event_ddata;
        struct clk *clk;
@@ -130,12 +130,14 @@ static void __init stm32_clockevent_init(struct device_node *np)
 
        data->base = of_iomap(np, 0);
        if (!data->base) {
+               ret = -ENXIO;
                pr_err("failed to map registers for clockevent\n");
                goto err_iomap;
        }
 
        irq = irq_of_parse_and_map(np, 0);
        if (!irq) {
+               ret = -EINVAL;
                pr_err("%s: failed to get irq.\n", np->full_name);
                goto err_get_irq;
        }
@@ -173,7 +175,7 @@ static void __init stm32_clockevent_init(struct device_node *np)
        pr_info("%s: STM32 clockevent driver initialized (%d bits)\n",
                        np->full_name, bits);
 
-       return;
+       return ret;
 
 err_get_irq:
        iounmap(data->base);
@@ -182,7 +184,7 @@ err_iomap:
 err_clk_enable:
        clk_put(clk);
 err_clk_get:
-       return;
+       return ret;
 }
 
 CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
index 24c83f9efd87f5f5df361eb9e4b0ecd76b6827f9..c184eb84101e9f9a72354baf4c87c4d2ada92d19 100644 (file)
@@ -311,33 +311,42 @@ err_free:
        return ret;
 }
 
-static void __init sun5i_timer_init(struct device_node *node)
+static int __init sun5i_timer_init(struct device_node *node)
 {
        struct reset_control *rstc;
        void __iomem *timer_base;
        struct clk *clk;
-       int irq;
+       int irq, ret;
 
        timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (IS_ERR(timer_base))
-               panic("Can't map registers");
+       if (IS_ERR(timer_base)) {
+               pr_err("Can't map registers");
+               return PTR_ERR(timer_base);;
+       }
 
        irq = irq_of_parse_and_map(node, 0);
-       if (irq <= 0)
-               panic("Can't parse IRQ");
+       if (irq <= 0) {
+               pr_err("Can't parse IRQ");
+               return -EINVAL;
+       }
 
        clk = of_clk_get(node, 0);
-       if (IS_ERR(clk))
-               panic("Can't get timer clock");
+       if (IS_ERR(clk)) {
+               pr_err("Can't get timer clock");
+               return PTR_ERR(clk);
+       }
 
        rstc = of_reset_control_get(node, NULL);
        if (!IS_ERR(rstc))
                reset_control_deassert(rstc);
 
-       sun5i_setup_clocksource(node, timer_base, clk, irq);
-       sun5i_setup_clockevent(node, timer_base, clk, irq);
+       ret = sun5i_setup_clocksource(node, timer_base, clk, irq);
+       if (ret)
+               return ret;
+
+       return sun5i_setup_clockevent(node, timer_base, clk, irq);
 }
 CLOCKSOURCE_OF_DECLARE(sun5i_a13, "allwinner,sun5i-a13-hstimer",
-                      sun5i_timer_init);
+                          sun5i_timer_init);
 CLOCKSOURCE_OF_DECLARE(sun7i_a20, "allwinner,sun7i-a20-hstimer",
-                      sun5i_timer_init);
+                          sun5i_timer_init);
index 8518d9dfba5c40b24cd6980c4d9ca00c3377c6ca..92b7e390f6c893bbbdb65af18163419c7820de05 100644 (file)
@@ -88,14 +88,14 @@ static u64 notrace omap_32k_read_sched_clock(void)
        return ti_32k_read_cycles(&ti_32k_timer.cs);
 }
 
-static void __init ti_32k_timer_init(struct device_node *np)
+static int __init ti_32k_timer_init(struct device_node *np)
 {
        int ret;
 
        ti_32k_timer.base = of_iomap(np, 0);
        if (!ti_32k_timer.base) {
                pr_err("Can't ioremap 32k timer base\n");
-               return;
+               return -ENXIO;
        }
 
        ti_32k_timer.counter = ti_32k_timer.base;
@@ -116,11 +116,13 @@ static void __init ti_32k_timer_init(struct device_node *np)
        ret = clocksource_register_hz(&ti_32k_timer.cs, 32768);
        if (ret) {
                pr_err("32k_counter: can't register clocksource\n");
-               return;
+               return ret;
        }
 
        sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
        pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(ti_32k_timer, "ti,omap-counter32k",
                ti_32k_timer_init);
index 1744b243898acc2f4e579c0117f566d11d9d30fe..704e40c6f151307ddaee42c16c869446513fa8d5 100644 (file)
@@ -359,27 +359,37 @@ static struct delay_timer u300_delay_timer;
 /*
  * This sets up the system timers, clock source and clock event.
  */
-static void __init u300_timer_init_of(struct device_node *np)
+static int __init u300_timer_init_of(struct device_node *np)
 {
        unsigned int irq;
        struct clk *clk;
        unsigned long rate;
+       int ret;
 
        u300_timer_base = of_iomap(np, 0);
-       if (!u300_timer_base)
-               panic("could not ioremap system timer\n");
+       if (!u300_timer_base) {
+               pr_err("could not ioremap system timer\n");
+               return -ENXIO;
+       }
 
        /* Get the IRQ for the GP1 timer */
        irq = irq_of_parse_and_map(np, 2);
-       if (!irq)
-               panic("no IRQ for system timer\n");
+       if (!irq) {
+               pr_err("no IRQ for system timer\n");
+               return -EINVAL;
+       }
 
        pr_info("U300 GP1 timer @ base: %p, IRQ: %u\n", u300_timer_base, irq);
 
        /* Clock the interrupt controller */
        clk = of_clk_get(np, 0);
-       BUG_ON(IS_ERR(clk));
-       clk_prepare_enable(clk);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       ret = clk_prepare_enable(clk);
+       if (ret)
+               return ret;
+
        rate = clk_get_rate(clk);
 
        u300_clockevent_data.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ);
@@ -410,7 +420,9 @@ static void __init u300_timer_init_of(struct device_node *np)
                u300_timer_base + U300_TIMER_APP_RGPT1);
 
        /* Set up the IRQ handler */
-       setup_irq(irq, &u300_timer_irq);
+       ret = setup_irq(irq, &u300_timer_irq);
+       if (ret)
+               return ret;
 
        /* Reset the General Purpose timer 2 */
        writel(U300_TIMER_APP_RGPT2_TIMER_RESET,
@@ -428,9 +440,12 @@ static void __init u300_timer_init_of(struct device_node *np)
                u300_timer_base + U300_TIMER_APP_EGPT2);
 
        /* Use general purpose timer 2 as clock source */
-       if (clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
-                       "GPT2", rate, 300, 32, clocksource_mmio_readl_up))
+       ret = clocksource_mmio_init(u300_timer_base + U300_TIMER_APP_GPT2CC,
+                                   "GPT2", rate, 300, 32, clocksource_mmio_readl_up);
+       if (ret) {
                pr_err("timer: failed to initialize U300 clock source\n");
+               return ret;
+       }
 
        /* Configure and register the clockevent */
        clockevents_config_and_register(&u300_clockevent_data.cevd, rate,
@@ -440,6 +455,7 @@ static void __init u300_timer_init_of(struct device_node *np)
         * TODO: init and register the rest of the timers too, they can be
         * used by hrtimers!
         */
+       return 0;
 }
 
 CLOCKSOURCE_OF_DECLARE(u300_timer, "stericsson,u300-apptimer",
index 0a26d3dde6c005dd80c7a2af23aadb9b81bbe62d..220b490a81428ef8477b3140d49cc85919dbdc10 100644 (file)
@@ -25,16 +25,18 @@ static u64 notrace versatile_sys_24mhz_read(void)
        return readl(versatile_sys_24mhz);
 }
 
-static void __init versatile_sched_clock_init(struct device_node *node)
+static int __init versatile_sched_clock_init(struct device_node *node)
 {
        void __iomem *base = of_iomap(node, 0);
 
        if (!base)
-               return;
+               return -ENXIO;
 
        versatile_sys_24mhz = base + SYS_24MHZ;
 
        sched_clock_register(versatile_sys_24mhz_read, 32, 24000000);
+
+       return 0;
 }
 CLOCKSOURCE_OF_DECLARE(vexpress, "arm,vexpress-sysreg",
                       versatile_sched_clock_init);
index a0e6c68536a18d8dbc76eb7f239bea31ca24240f..55d8d8402d903dcb39eb5ad40b8ba1f285de1596 100644 (file)
@@ -156,15 +156,18 @@ static int __init pit_clockevent_init(unsigned long rate, int irq)
        return 0;
 }
 
-static void __init pit_timer_init(struct device_node *np)
+static int __init pit_timer_init(struct device_node *np)
 {
        struct clk *pit_clk;
        void __iomem *timer_base;
        unsigned long clk_rate;
-       int irq;
+       int irq, ret;
 
        timer_base = of_iomap(np, 0);
-       BUG_ON(!timer_base);
+       if (!timer_base) {
+               pr_err("Failed to iomap");
+               return -ENXIO;
+       }
 
        /*
         * PIT0 and PIT1 can be chained to build a 64-bit timer,
@@ -175,12 +178,16 @@ static void __init pit_timer_init(struct device_node *np)
        clkevt_base = timer_base + PITn_OFFSET(3);
 
        irq = irq_of_parse_and_map(np, 0);
-       BUG_ON(irq <= 0);
+       if (irq <= 0)
+               return -EINVAL;
 
        pit_clk = of_clk_get(np, 0);
-       BUG_ON(IS_ERR(pit_clk));
+       if (IS_ERR(pit_clk))
+               return PTR_ERR(pit_clk);
 
-       BUG_ON(clk_prepare_enable(pit_clk));
+       ret = clk_prepare_enable(pit_clk);
+       if (ret)
+               return ret;
 
        clk_rate = clk_get_rate(pit_clk);
        cycle_per_jiffy = clk_rate / (HZ);
@@ -188,8 +195,10 @@ static void __init pit_timer_init(struct device_node *np)
        /* enable the pit module */
        __raw_writel(~PITMCR_MDIS, timer_base + PITMCR);
 
-       BUG_ON(pit_clocksource_init(clk_rate));
+       ret = pit_clocksource_init(clk_rate);
+       if (ret)
+               return ret;
 
-       pit_clockevent_init(clk_rate, irq);
+       return pit_clockevent_init(clk_rate, irq);
 }
 CLOCKSOURCE_OF_DECLARE(vf610, "fsl,vf610-pit", pit_timer_init);
index ddb409274f45a72a6b3af5289ee1779396523f73..b15069483fbde1b655fa0932b10a6d6be4fa3d44 100644 (file)
@@ -121,38 +121,48 @@ static struct irqaction irq = {
        .dev_id  = &clockevent,
 };
 
-static void __init vt8500_timer_init(struct device_node *np)
+static int __init vt8500_timer_init(struct device_node *np)
 {
-       int timer_irq;
+       int timer_irq, ret;
 
        regbase = of_iomap(np, 0);
        if (!regbase) {
                pr_err("%s: Missing iobase description in Device Tree\n",
                                                                __func__);
-               return;
+               return -ENXIO;
        }
+
        timer_irq = irq_of_parse_and_map(np, 0);
        if (!timer_irq) {
                pr_err("%s: Missing irq description in Device Tree\n",
                                                                __func__);
-               return;
+               return -EINVAL;
        }
 
        writel(1, regbase + TIMER_CTRL_VAL);
        writel(0xf, regbase + TIMER_STATUS_VAL);
        writel(~0, regbase + TIMER_MATCH_VAL);
 
-       if (clocksource_register_hz(&clocksource, VT8500_TIMER_HZ))
+       ret = clocksource_register_hz(&clocksource, VT8500_TIMER_HZ);
+       if (ret) {
                pr_err("%s: vt8500_timer_init: clocksource_register failed for %s\n",
-                                       __func__, clocksource.name);
+                      __func__, clocksource.name);
+               return ret;
+       }
 
        clockevent.cpumask = cpumask_of(0);
 
-       if (setup_irq(timer_irq, &irq))
+       ret = setup_irq(timer_irq, &irq);
+       if (ret) {
                pr_err("%s: setup_irq failed for %s\n", __func__,
                                                        clockevent.name);
+               return ret;
+       }
+
        clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
                                        MIN_OSCR_DELTA * 2, 0xf0000000);
+
+       return 0;
 }
 
 CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
index ceaa6133f9c2872e5abb2c35d442d0ff9821134f..9a53f5ef61571613ff65972a86e8d253ada7340e 100644 (file)
@@ -210,9 +210,9 @@ error_free:
        return ret;
 }
 
-static void __init zevio_timer_init(struct device_node *node)
+static int __init zevio_timer_init(struct device_node *node)
 {
-       BUG_ON(zevio_timer_add(node));
+       return zevio_timer_add(node);
 }
 
 CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
index 1fa1a32928d70a30050b30ca2d6eef4aea62d8e3..28690b284846f50ce87e2b1dd56a897bed36d3a7 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/msr.h>
 #include <asm/cpu_device_id.h>
 #include <asm/cpufeature.h>
+#include <asm/intel-family.h>
 
 #define ATOM_RATIOS            0x66a
 #define ATOM_VIDS              0x66b
@@ -1334,29 +1335,29 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
                        (unsigned long)&policy }
 
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
-       ICPU(0x2a, core_params),
-       ICPU(0x2d, core_params),
-       ICPU(0x37, silvermont_params),
-       ICPU(0x3a, core_params),
-       ICPU(0x3c, core_params),
-       ICPU(0x3d, core_params),
-       ICPU(0x3e, core_params),
-       ICPU(0x3f, core_params),
-       ICPU(0x45, core_params),
-       ICPU(0x46, core_params),
-       ICPU(0x47, core_params),
-       ICPU(0x4c, airmont_params),
-       ICPU(0x4e, core_params),
-       ICPU(0x4f, core_params),
-       ICPU(0x5e, core_params),
-       ICPU(0x56, core_params),
-       ICPU(0x57, knl_params),
+       ICPU(INTEL_FAM6_SANDYBRIDGE,            core_params),
+       ICPU(INTEL_FAM6_SANDYBRIDGE_X,          core_params),
+       ICPU(INTEL_FAM6_ATOM_SILVERMONT1,       silvermont_params),
+       ICPU(INTEL_FAM6_IVYBRIDGE,              core_params),
+       ICPU(INTEL_FAM6_HASWELL_CORE,           core_params),
+       ICPU(INTEL_FAM6_BROADWELL_CORE,         core_params),
+       ICPU(INTEL_FAM6_IVYBRIDGE_X,            core_params),
+       ICPU(INTEL_FAM6_HASWELL_X,              core_params),
+       ICPU(INTEL_FAM6_HASWELL_ULT,            core_params),
+       ICPU(INTEL_FAM6_HASWELL_GT3E,           core_params),
+       ICPU(INTEL_FAM6_BROADWELL_GT3E,         core_params),
+       ICPU(INTEL_FAM6_ATOM_AIRMONT,           airmont_params),
+       ICPU(INTEL_FAM6_SKYLAKE_MOBILE,         core_params),
+       ICPU(INTEL_FAM6_BROADWELL_X,            core_params),
+       ICPU(INTEL_FAM6_SKYLAKE_DESKTOP,        core_params),
+       ICPU(INTEL_FAM6_BROADWELL_XEON_D,       core_params),
+       ICPU(INTEL_FAM6_XEON_PHI_KNL,           knl_params),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
 
 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
-       ICPU(0x56, core_params),
+       ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_params),
        {}
 };
 
index 54c45368e3f17d3333a124cbb378ccf0fefee9fe..6bd715b7f11c71373adbaa9e285b992a55e45d2c 100644 (file)
@@ -530,8 +530,7 @@ static inline void  queue_gpstate_timer(struct global_pstate_info *gpstates)
        else
                timer_interval = GPSTATE_TIMER_INTERVAL;
 
-       mod_timer_pinned(&gpstates->timer, jiffies +
-                       msecs_to_jiffies(timer_interval));
+       mod_timer(&gpstates->timer, jiffies + msecs_to_jiffies(timer_interval));
 }
 
 /**
@@ -699,7 +698,7 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
        policy->driver_data = gpstates;
 
        /* initialize timer */
-       init_timer_deferrable(&gpstates->timer);
+       init_timer_pinned_deferrable(&gpstates->timer);
        gpstates->timer.data = (unsigned long)policy;
        gpstates->timer.function = gpstate_timer_handler;
        gpstates->timer.expires = jiffies +
index a4d0059e232cbd22478d29d92370eb027e8bde79..c73207abb5a44a77ed0819101ab75cd626ceb353 100644 (file)
@@ -173,7 +173,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
 
        struct cpuidle_state *target_state = &drv->states[index];
        bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
-       u64 time_start, time_end;
+       ktime_t time_start, time_end;
        s64 diff;
 
        /*
@@ -195,13 +195,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        sched_idle_set_state(target_state);
 
        trace_cpu_idle_rcuidle(index, dev->cpu);
-       time_start = local_clock();
+       time_start = ns_to_ktime(local_clock());
 
        stop_critical_timings();
        entered_state = target_state->enter(dev, drv, index);
        start_critical_timings();
 
-       time_end = local_clock();
+       time_end = ns_to_ktime(local_clock());
        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
        /* The cpu is no longer idle or about to enter idle. */
@@ -217,11 +217,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
        if (!cpuidle_state_is_coupled(drv, index))
                local_irq_enable();
 
-       /*
-        * local_clock() returns the time in nanosecond, let's shift
-        * by 10 (divide by 1024) to have microsecond based time.
-        */
-       diff = (time_end - time_start) >> 10;
+       diff = ktime_us_delta(time_end, time_start);
        if (diff > INT_MAX)
                diff = INT_MAX;
 
index 6d74b91f2152807a0f65470dd799aa5080a3c111..5fc3dbb9ada0240011f58fa072e3015fb69bea7e 100644 (file)
@@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
                             $(obj)/qat_rsapubkey-asn1.h
 $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
                              $(obj)/qat_rsaprivkey-asn1.h
+$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
 
 clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
 clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
index f8c5cd53307c2c5993add0114720bd8b005e4f21..c5f21efd6090ee35b2cbad7e4ceeb59a391ecc8a 100644 (file)
@@ -126,28 +126,33 @@ static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
        hsu_dma_start_channel(hsuc);
 }
 
-static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
-{
-       unsigned long flags;
-       u32 sr;
-
-       spin_lock_irqsave(&hsuc->vchan.lock, flags);
-       sr = hsu_chan_readl(hsuc, HSU_CH_SR);
-       spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
-
-       return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
-}
-
-irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
+/*
+ *      hsu_dma_get_status() - get DMA channel status
+ *      @chip: HSUART DMA chip
+ *      @nr: DMA channel number
+ *      @status: pointer for DMA Channel Status Register value
+ *
+ *      Description:
+ *      The function reads and clears the DMA Channel Status Register, checks
+ *      if it was a timeout interrupt and returns a corresponding value.
+ *
+ *      Caller should provide a valid pointer for the DMA Channel Status
+ *      Register value that will be returned in @status.
+ *
+ *      Return:
+ *      1 for DMA timeout status, 0 for other DMA status, or error code for
+ *      invalid parameters or no interrupt pending.
+ */
+int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
+                      u32 *status)
 {
        struct hsu_dma_chan *hsuc;
-       struct hsu_dma_desc *desc;
        unsigned long flags;
        u32 sr;
 
        /* Sanity check */
        if (nr >= chip->hsu->nr_channels)
-               return IRQ_NONE;
+               return -EINVAL;
 
        hsuc = &chip->hsu->chan[nr];
 
@@ -155,22 +160,65 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
         * No matter what situation, need read clear the IRQ status
         * There is a bug, see Errata 5, HSD 2900918
         */
-       sr = hsu_dma_chan_get_sr(hsuc);
+       spin_lock_irqsave(&hsuc->vchan.lock, flags);
+       sr = hsu_chan_readl(hsuc, HSU_CH_SR);
+       spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
+
+       /* Check if any interrupt is pending */
+       sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
        if (!sr)
-               return IRQ_NONE;
+               return -EIO;
 
        /* Timeout IRQ, need wait some time, see Errata 2 */
        if (sr & HSU_CH_SR_DESCTO_ANY)
                udelay(2);
 
+       /*
+        * At this point, at least one of Descriptor Time Out, Channel Error
+        * or Descriptor Done bits must be set. Clear the Descriptor Time Out
+        * bits and if sr is still non-zero, it must be channel error or
+        * descriptor done which are higher priority than timeout and handled
+        * in hsu_dma_do_irq(). Else, it must be a timeout.
+        */
        sr &= ~HSU_CH_SR_DESCTO_ANY;
-       if (!sr)
-               return IRQ_HANDLED;
+
+       *status = sr;
+
+       return sr ? 0 : 1;
+}
+EXPORT_SYMBOL_GPL(hsu_dma_get_status);
+
+/*
+ *      hsu_dma_do_irq() - DMA interrupt handler
+ *      @chip: HSUART DMA chip
+ *      @nr: DMA channel number
+ *      @status: Channel Status Register value
+ *
+ *      Description:
+ *      This function handles Channel Error and Descriptor Done interrupts.
+ *      This function should be called after determining that the DMA interrupt
+ *      is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0.
+ *
+ *      Return:
+ *      IRQ_NONE for invalid channel number, IRQ_HANDLED otherwise.
+ */
+irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
+                          u32 status)
+{
+       struct hsu_dma_chan *hsuc;
+       struct hsu_dma_desc *desc;
+       unsigned long flags;
+
+       /* Sanity check */
+       if (nr >= chip->hsu->nr_channels)
+               return IRQ_NONE;
+
+       hsuc = &chip->hsu->chan[nr];
 
        spin_lock_irqsave(&hsuc->vchan.lock, flags);
        desc = hsuc->desc;
        if (desc) {
-               if (sr & HSU_CH_SR_CHE) {
+               if (status & HSU_CH_SR_CHE) {
                        desc->status = DMA_ERROR;
                } else if (desc->active < desc->nents) {
                        hsu_dma_start_channel(hsuc);
@@ -184,7 +232,7 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
 
        return IRQ_HANDLED;
 }
-EXPORT_SYMBOL_GPL(hsu_dma_irq);
+EXPORT_SYMBOL_GPL(hsu_dma_do_irq);
 
 static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents)
 {
index e2db76bd56d89e9118308cb2dfb5d03b64811c0a..9916058531d93945a4534145ffdee75d4f6be6e2 100644 (file)
@@ -27,13 +27,20 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev)
 {
        struct hsu_dma_chip *chip = dev;
        u32 dmaisr;
+       u32 status;
        unsigned short i;
        irqreturn_t ret = IRQ_NONE;
+       int err;
 
        dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
        for (i = 0; i < chip->hsu->nr_channels; i++) {
-               if (dmaisr & 0x1)
-                       ret |= hsu_dma_irq(chip, i);
+               if (dmaisr & 0x1) {
+                       err = hsu_dma_get_status(chip, i, &status);
+                       if (err > 0)
+                               ret |= IRQ_HANDLED;
+                       else if (err == 0)
+                               ret |= hsu_dma_do_irq(chip, i, status);
+               }
                dmaisr >>= 1;
        }
 
index 6744d88bdea89782c524ede7d843c3d6bcd67ee2..4fb2eb7c800d8839c6329cd34589eeea4dbaa5c0 100644 (file)
@@ -2378,22 +2378,19 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
  * @num_mc: pointer to the memory controllers count, to be incremented in case
  *         of success.
  * @table: model specific table
- * @allow_dups: allow for multiple devices to exist with the same device id
- *              (as implemented, this isn't expected to work correctly in the
- *              multi-socket case).
- * @multi_bus: don't assume devices on different buses belong to different
- *             memory controllers.
  *
  * returns 0 in case of success or error code
  */
-static int sbridge_get_all_devices_full(u8 *num_mc,
-                                       const struct pci_id_table *table,
-                                       int allow_dups,
-                                       int multi_bus)
+static int sbridge_get_all_devices(u8 *num_mc,
+                                       const struct pci_id_table *table)
 {
        int i, rc;
        struct pci_dev *pdev = NULL;
+       int allow_dups = 0;
+       int multi_bus = 0;
 
+       if (table->type == KNIGHTS_LANDING)
+               allow_dups = multi_bus = 1;
        while (table && table->descr) {
                for (i = 0; i < table->n_devs; i++) {
                        if (!allow_dups || i == 0 ||
@@ -2420,11 +2417,6 @@ static int sbridge_get_all_devices_full(u8 *num_mc,
        return 0;
 }
 
-#define sbridge_get_all_devices(num_mc, table) \
-               sbridge_get_all_devices_full(num_mc, table, 0, 0)
-#define sbridge_get_all_devices_knl(num_mc, table) \
-               sbridge_get_all_devices_full(num_mc, table, 1, 1)
-
 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
                                 struct sbridge_dev *sbridge_dev)
 {
index 2a0e4f45d5b204b1f7d84dff48b952c8c0b9e34c..972c813c375b086e2b5950d2be1780dccd78dbfc 100644 (file)
@@ -2,7 +2,8 @@
 # Makefile for external connector class (extcon) devices
 #
 
-obj-$(CONFIG_EXTCON)           += extcon.o
+obj-$(CONFIG_EXTCON)           += extcon-core.o
+extcon-core-objs               += extcon.o devres.o
 obj-$(CONFIG_EXTCON_ADC_JACK)  += extcon-adc-jack.o
 obj-$(CONFIG_EXTCON_ARIZONA)   += extcon-arizona.o
 obj-$(CONFIG_EXTCON_AXP288)    += extcon-axp288.o
diff --git a/drivers/extcon/devres.c b/drivers/extcon/devres.c
new file mode 100644 (file)
index 0000000..e686acd
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ *  drivers/extcon/devres.c - EXTCON device's resource management
+ *
+ * Copyright (C) 2016 Samsung Electronics
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/extcon.h>
+
+static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
+{
+       struct extcon_dev **r = res;
+
+       if (WARN_ON(!r || !*r))
+               return 0;
+
+       return *r == data;
+}
+
+static void devm_extcon_dev_release(struct device *dev, void *res)
+{
+       extcon_dev_free(*(struct extcon_dev **)res);
+}
+
+
+static void devm_extcon_dev_unreg(struct device *dev, void *res)
+{
+       extcon_dev_unregister(*(struct extcon_dev **)res);
+}
+
+struct extcon_dev_notifier_devres {
+       struct extcon_dev *edev;
+       unsigned int id;
+       struct notifier_block *nb;
+};
+
+static void devm_extcon_dev_notifier_unreg(struct device *dev, void *res)
+{
+       struct extcon_dev_notifier_devres *this = res;
+
+       extcon_unregister_notifier(this->edev, this->id, this->nb);
+}
+
+/**
+ * devm_extcon_dev_allocate - Allocate managed extcon device
+ * @dev:               device owning the extcon device being created
+ * @supported_cable:   Array of supported extcon ending with EXTCON_NONE.
+ *                     If supported_cable is NULL, cable name related APIs
+ *                     are disabled.
+ *
+ * This function manages automatically the memory of extcon device using device
+ * resource management and simplify the control of freeing the memory of extcon
+ * device.
+ *
+ * Returns the pointer memory of allocated extcon_dev if success
+ * or ERR_PTR(err) if fail
+ */
+struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
+                                       const unsigned int *supported_cable)
+{
+       struct extcon_dev **ptr, *edev;
+
+       ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return ERR_PTR(-ENOMEM);
+
+       edev = extcon_dev_allocate(supported_cable);
+       if (IS_ERR(edev)) {
+               devres_free(ptr);
+               return edev;
+       }
+
+       edev->dev.parent = dev;
+
+       *ptr = edev;
+       devres_add(dev, ptr);
+
+       return edev;
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
+
+/**
+ * devm_extcon_dev_free() - Resource-managed extcon_dev_unregister()
+ * @dev:       device the extcon belongs to
+ * @edev:      the extcon device to unregister
+ *
+ * Free the memory that is allocated with devm_extcon_dev_allocate()
+ * function.
+ */
+void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
+{
+       WARN_ON(devres_release(dev, devm_extcon_dev_release,
+                              devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
+
+/**
+ * devm_extcon_dev_register() - Resource-managed extcon_dev_register()
+ * @dev:       device to allocate extcon device
+ * @edev:      the new extcon device to register
+ *
+ * Managed extcon_dev_register() function. If extcon device is attached with
+ * this function, that extcon device is automatically unregistered on driver
+ * detach. Internally this function calls extcon_dev_register() function.
+ * To get more information, refer that function.
+ *
+ * If extcon device is registered with this function and the device needs to be
+ * unregistered separately, devm_extcon_dev_unregister() should be used.
+ *
+ * Returns 0 if success or negaive error number if failure.
+ */
+int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
+{
+       struct extcon_dev **ptr;
+       int ret;
+
+       ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       ret = extcon_dev_register(edev);
+       if (ret) {
+               devres_free(ptr);
+               return ret;
+       }
+
+       *ptr = edev;
+       devres_add(dev, ptr);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
+
+/**
+ * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
+ * @dev:       device the extcon belongs to
+ * @edev:      the extcon device to unregister
+ *
+ * Unregister extcon device that is registered with devm_extcon_dev_register()
+ * function.
+ */
+void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
+{
+       WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
+                              devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
+
+/**
+ * devm_extcon_register_notifier() - Resource-managed extcon_register_notifier()
+ * @dev:       device to allocate extcon device
+ * @edev:      the extcon device that has the external connecotr.
+ * @id:                the unique id of each external connector in extcon enumeration.
+ * @nb:                a notifier block to be registered.
+ *
+ * This function manages automatically the notifier of extcon device using
+ * device resource management and simplify the control of unregistering
+ * the notifier of extcon device.
+ *
+ * Note that the second parameter given to the callback of nb (val) is
+ * "old_state", not the current state. The current state can be retrieved
+ * by looking at the third pameter (edev pointer)'s state value.
+ *
+ * Returns 0 if success or negaive error number if failure.
+ */
+int devm_extcon_register_notifier(struct device *dev, struct extcon_dev *edev,
+                               unsigned int id, struct notifier_block *nb)
+{
+       struct extcon_dev_notifier_devres *ptr;
+       int ret;
+
+       ptr = devres_alloc(devm_extcon_dev_notifier_unreg, sizeof(*ptr),
+                               GFP_KERNEL);
+       if (!ptr)
+               return -ENOMEM;
+
+       ret = extcon_register_notifier(edev, id, nb);
+       if (ret) {
+               devres_free(ptr);
+               return ret;
+       }
+
+       ptr->edev = edev;
+       ptr->id = id;
+       ptr->nb = nb;
+       devres_add(dev, ptr);
+
+       return 0;
+}
+EXPORT_SYMBOL(devm_extcon_register_notifier);
+
+/**
+ * devm_extcon_unregister_notifier()
+                       - Resource-managed extcon_unregister_notifier()
+ * @dev:       device to allocate extcon device
+ * @edev:      the extcon device that has the external connecotr.
+ * @id:                the unique id of each external connector in extcon enumeration.
+ * @nb:                a notifier block to be registered.
+ */
+void devm_extcon_unregister_notifier(struct device *dev,
+                               struct extcon_dev *edev, unsigned int id,
+                               struct notifier_block *nb)
+{
+       WARN_ON(devres_release(dev, devm_extcon_dev_notifier_unreg,
+                              devm_extcon_dev_match, edev));
+}
+EXPORT_SYMBOL(devm_extcon_unregister_notifier);
index 7fc0ae1912f855c586ef0a9b064ea3bba68ff3b6..44e48aa78a84bbdaf7f925bae5c80f8ffe634680 100644 (file)
@@ -38,6 +38,7 @@
  * @chan:              iio channel being queried.
  */
 struct adc_jack_data {
+       struct device *dev;
        struct extcon_dev *edev;
 
        const unsigned int **cable_names;
@@ -49,6 +50,7 @@ struct adc_jack_data {
        struct delayed_work handler;
 
        struct iio_channel *chan;
+       bool wakeup_source;
 };
 
 static void adc_jack_handler(struct work_struct *work)
@@ -105,6 +107,7 @@ static int adc_jack_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       data->dev = &pdev->dev;
        data->edev = devm_extcon_dev_allocate(&pdev->dev, pdata->cable_names);
        if (IS_ERR(data->edev)) {
                dev_err(&pdev->dev, "failed to allocate extcon device\n");
@@ -128,6 +131,7 @@ static int adc_jack_probe(struct platform_device *pdev)
                return PTR_ERR(data->chan);
 
        data->handling_delay = msecs_to_jiffies(pdata->handling_delay_ms);
+       data->wakeup_source = pdata->wakeup_source;
 
        INIT_DEFERRABLE_WORK(&data->handler, adc_jack_handler);
 
@@ -151,6 +155,9 @@ static int adc_jack_probe(struct platform_device *pdev)
                return err;
        }
 
+       if (data->wakeup_source)
+               device_init_wakeup(&pdev->dev, 1);
+
        return 0;
 }
 
@@ -165,11 +172,38 @@ static int adc_jack_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int adc_jack_suspend(struct device *dev)
+{
+       struct adc_jack_data *data = dev_get_drvdata(dev);
+
+       cancel_delayed_work_sync(&data->handler);
+       if (device_may_wakeup(data->dev))
+               enable_irq_wake(data->irq);
+
+       return 0;
+}
+
+static int adc_jack_resume(struct device *dev)
+{
+       struct adc_jack_data *data = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(data->dev))
+               disable_irq_wake(data->irq);
+
+       return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(adc_jack_pm_ops,
+               adc_jack_suspend, adc_jack_resume);
+
 static struct platform_driver adc_jack_driver = {
        .probe          = adc_jack_probe,
        .remove         = adc_jack_remove,
        .driver         = {
                .name   = "adc-jack",
+               .pm = &adc_jack_pm_ops,
        },
 };
 
index 2b2fecffb1ad647ba1b0184da3872fef3a287c23..2512660dc4b9508c5dd26a2a63177a7681f52d56 100644 (file)
 #include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
+#include <linux/acpi.h>
 
 #define USB_GPIO_DEBOUNCE_MS   20      /* ms */
 
@@ -91,7 +93,7 @@ static int usb_extcon_probe(struct platform_device *pdev)
        struct usb_extcon_info *info;
        int ret;
 
-       if (!np)
+       if (!np && !ACPI_HANDLE(dev))
                return -EINVAL;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -141,7 +143,8 @@ static int usb_extcon_probe(struct platform_device *pdev)
        }
 
        platform_set_drvdata(pdev, info);
-       device_init_wakeup(dev, 1);
+       device_init_wakeup(dev, true);
+       dev_pm_set_wake_irq(dev, info->id_irq);
 
        /* Perform initial detection */
        usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -155,6 +158,9 @@ static int usb_extcon_remove(struct platform_device *pdev)
 
        cancel_delayed_work_sync(&info->wq_detcable);
 
+       dev_pm_clear_wake_irq(&pdev->dev);
+       device_init_wakeup(&pdev->dev, false);
+
        return 0;
 }
 
@@ -164,12 +170,6 @@ static int usb_extcon_suspend(struct device *dev)
        struct usb_extcon_info *info = dev_get_drvdata(dev);
        int ret = 0;
 
-       if (device_may_wakeup(dev)) {
-               ret = enable_irq_wake(info->id_irq);
-               if (ret)
-                       return ret;
-       }
-
        /*
         * We don't want to process any IRQs after this point
         * as GPIOs used behind I2C subsystem might not be
@@ -185,13 +185,10 @@ static int usb_extcon_resume(struct device *dev)
        struct usb_extcon_info *info = dev_get_drvdata(dev);
        int ret = 0;
 
-       if (device_may_wakeup(dev)) {
-               ret = disable_irq_wake(info->id_irq);
-               if (ret)
-                       return ret;
-       }
-
        enable_irq(info->id_irq);
+       if (!device_may_wakeup(dev))
+               queue_delayed_work(system_power_efficient_wq,
+                                  &info->wq_detcable, 0);
 
        return ret;
 }
@@ -206,6 +203,12 @@ static const struct of_device_id usb_extcon_dt_match[] = {
 };
 MODULE_DEVICE_TABLE(of, usb_extcon_dt_match);
 
+static const struct platform_device_id usb_extcon_platform_ids[] = {
+       { .name = "extcon-usb-gpio", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, usb_extcon_platform_ids);
+
 static struct platform_driver usb_extcon_driver = {
        .probe          = usb_extcon_probe,
        .remove         = usb_extcon_remove,
@@ -214,6 +217,7 @@ static struct platform_driver usb_extcon_driver = {
                .pm     = &usb_extcon_pm_ops,
                .of_match_table = usb_extcon_dt_match,
        },
+       .id_table = usb_extcon_platform_ids,
 };
 
 module_platform_driver(usb_extcon_driver);
index 21a123cadf78278947e551aaaa1772a60728969a..8682efc0f57b7557761feef72433ecfad08d237b 100644 (file)
@@ -77,6 +77,26 @@ static const char *extcon_name[] =  {
        NULL,
 };
 
+/**
+ * struct extcon_cable - An internal data for each cable of extcon device.
+ * @edev:              The extcon device
+ * @cable_index:       Index of this cable in the edev
+ * @attr_g:            Attribute group for the cable
+ * @attr_name:         "name" sysfs entry
+ * @attr_state:                "state" sysfs entry
+ * @attrs:             Array pointing to attr_name and attr_state for attr_g
+ */
+struct extcon_cable {
+       struct extcon_dev *edev;
+       int cable_index;
+
+       struct attribute_group attr_g;
+       struct device_attribute attr_name;
+       struct device_attribute attr_state;
+
+       struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
+};
+
 static struct class *extcon_class;
 #if defined(CONFIG_ANDROID)
 static struct class_compat *switch_class;
@@ -127,38 +147,6 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
        return -EINVAL;
 }
 
-static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
-{
-       int id = -EINVAL;
-       int i = 0;
-
-       /* Find the id of extcon cable */
-       while (extcon_name[i]) {
-               if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
-                       id = i;
-                       break;
-               }
-               i++;
-       }
-
-       return id;
-}
-
-static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
-{
-       int id;
-
-       if (edev->max_supported == 0)
-               return -EINVAL;
-
-       /* Find the the number of extcon cable */
-       id = find_cable_id_by_name(edev, name);
-       if (id < 0)
-               return id;
-
-       return find_cable_index_by_id(edev, id);
-}
-
 static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
 {
        if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
@@ -373,25 +361,6 @@ int extcon_get_cable_state_(struct extcon_dev *edev, const unsigned int id)
 }
 EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
 
-/**
- * extcon_get_cable_state() - Get the status of a specific cable.
- * @edev:      the extcon device that has the cable.
- * @cable_name:        cable name.
- *
- * Note that this is slower than extcon_get_cable_state_.
- */
-int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
-{
-       int id;
-
-       id = find_cable_id_by_name(edev, cable_name);
-       if (id < 0)
-               return id;
-
-       return extcon_get_cable_state_(edev, id);
-}
-EXPORT_SYMBOL_GPL(extcon_get_cable_state);
-
 /**
  * extcon_set_cable_state_() - Set the status of a specific cable.
  * @edev:              the extcon device that has the cable.
@@ -421,28 +390,6 @@ int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
 }
 EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
 
-/**
- * extcon_set_cable_state() - Set the status of a specific cable.
- * @edev:              the extcon device that has the cable.
- * @cable_name:                cable name.
- * @cable_state:       the new cable status. The default semantics is
- *                     true: attached / false: detached.
- *
- * Note that this is slower than extcon_set_cable_state_.
- */
-int extcon_set_cable_state(struct extcon_dev *edev,
-                       const char *cable_name, bool cable_state)
-{
-       int id;
-
-       id = find_cable_id_by_name(edev, cable_name);
-       if (id < 0)
-               return id;
-
-       return extcon_set_cable_state_(edev, id, cable_state);
-}
-EXPORT_SYMBOL_GPL(extcon_set_cable_state);
-
 /**
  * extcon_get_extcon_dev() - Get the extcon device instance from the name
  * @extcon_name:       The extcon name provided with extcon_dev_register()
@@ -466,105 +413,6 @@ out:
 }
 EXPORT_SYMBOL_GPL(extcon_get_extcon_dev);
 
-/**
- * extcon_register_interest() - Register a notifier for a state change of a
- *                             specific cable, not an entier set of cables of a
- *                             extcon device.
- * @obj:               an empty extcon_specific_cable_nb object to be returned.
- * @extcon_name:       the name of extcon device.
- *                     if NULL, extcon_register_interest will register
- *                     every cable with the target cable_name given.
- * @cable_name:                the target cable name.
- * @nb:                        the notifier block to get notified.
- *
- * Provide an empty extcon_specific_cable_nb. extcon_register_interest() sets
- * the struct for you.
- *
- * extcon_register_interest is a helper function for those who want to get
- * notification for a single specific cable's status change. If a user wants
- * to get notification for any changes of all cables of a extcon device,
- * he/she should use the general extcon_register_notifier().
- *
- * Note that the second parameter given to the callback of nb (val) is
- * "old_state", not the current state. The current state can be retrieved
- * by looking at the third pameter (edev pointer)'s state value.
- */
-int extcon_register_interest(struct extcon_specific_cable_nb *obj,
-                            const char *extcon_name, const char *cable_name,
-                            struct notifier_block *nb)
-{
-       unsigned long flags;
-       int ret;
-
-       if (!obj || !cable_name || !nb)
-               return -EINVAL;
-
-       if (extcon_name) {
-               obj->edev = extcon_get_extcon_dev(extcon_name);
-               if (!obj->edev)
-                       return -ENODEV;
-
-               obj->cable_index = find_cable_index_by_name(obj->edev,
-                                                       cable_name);
-               if (obj->cable_index < 0)
-                       return obj->cable_index;
-
-               obj->user_nb = nb;
-
-               spin_lock_irqsave(&obj->edev->lock, flags);
-               ret = raw_notifier_chain_register(
-                                       &obj->edev->nh[obj->cable_index],
-                                       obj->user_nb);
-               spin_unlock_irqrestore(&obj->edev->lock, flags);
-       } else {
-               struct class_dev_iter iter;
-               struct extcon_dev *extd;
-               struct device *dev;
-
-               if (!extcon_class)
-                       return -ENODEV;
-               class_dev_iter_init(&iter, extcon_class, NULL, NULL);
-               while ((dev = class_dev_iter_next(&iter))) {
-                       extd = dev_get_drvdata(dev);
-
-                       if (find_cable_index_by_name(extd, cable_name) < 0)
-                               continue;
-
-                       class_dev_iter_exit(&iter);
-                       return extcon_register_interest(obj, extd->name,
-                                               cable_name, nb);
-               }
-
-               ret = -ENODEV;
-       }
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(extcon_register_interest);
-
-/**
- * extcon_unregister_interest() - Unregister the notifier registered by
- *                               extcon_register_interest().
- * @obj:       the extcon_specific_cable_nb object returned by
- *             extcon_register_interest().
- */
-int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
-{
-       unsigned long flags;
-       int ret;
-
-       if (!obj)
-               return -EINVAL;
-
-       spin_lock_irqsave(&obj->edev->lock, flags);
-       ret = raw_notifier_chain_unregister(
-                       &obj->edev->nh[obj->cable_index], obj->user_nb);
-       spin_unlock_irqrestore(&obj->edev->lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(extcon_unregister_interest);
-
 /**
  * extcon_register_notifier() - Register a notifiee to get notified by
  *                             any attach status changes from the extcon.
@@ -582,14 +430,35 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
        unsigned long flags;
        int ret, idx;
 
-       if (!edev || !nb)
+       if (!nb)
                return -EINVAL;
 
-       idx = find_cable_index_by_id(edev, id);
+       if (edev) {
+               idx = find_cable_index_by_id(edev, id);
+               if (idx < 0)
+                       return idx;
 
-       spin_lock_irqsave(&edev->lock, flags);
-       ret = raw_notifier_chain_register(&edev->nh[idx], nb);
-       spin_unlock_irqrestore(&edev->lock, flags);
+               spin_lock_irqsave(&edev->lock, flags);
+               ret = raw_notifier_chain_register(&edev->nh[idx], nb);
+               spin_unlock_irqrestore(&edev->lock, flags);
+       } else {
+               struct extcon_dev *extd;
+
+               mutex_lock(&extcon_dev_list_lock);
+               list_for_each_entry(extd, &extcon_dev_list, entry) {
+                       idx = find_cable_index_by_id(extd, id);
+                       if (idx >= 0)
+                               break;
+               }
+               mutex_unlock(&extcon_dev_list_lock);
+
+               if (idx >= 0) {
+                       edev = extd;
+                       return extcon_register_notifier(extd, id, nb);
+               } else {
+                       ret = -ENODEV;
+               }
+       }
 
        return ret;
 }
@@ -611,6 +480,8 @@ int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
                return -EINVAL;
 
        idx = find_cable_index_by_id(edev, id);
+       if (idx < 0)
+               return idx;
 
        spin_lock_irqsave(&edev->lock, flags);
        ret = raw_notifier_chain_unregister(&edev->nh[idx], nb);
@@ -693,66 +564,6 @@ void extcon_dev_free(struct extcon_dev *edev)
 }
 EXPORT_SYMBOL_GPL(extcon_dev_free);
 
-static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
-{
-       struct extcon_dev **r = res;
-
-       if (WARN_ON(!r || !*r))
-               return 0;
-
-       return *r == data;
-}
-
-static void devm_extcon_dev_release(struct device *dev, void *res)
-{
-       extcon_dev_free(*(struct extcon_dev **)res);
-}
-
-/**
- * devm_extcon_dev_allocate - Allocate managed extcon device
- * @dev:               device owning the extcon device being created
- * @supported_cable:   Array of supported extcon ending with EXTCON_NONE.
- *                     If supported_cable is NULL, cable name related APIs
- *                     are disabled.
- *
- * This function manages automatically the memory of extcon device using device
- * resource management and simplify the control of freeing the memory of extcon
- * device.
- *
- * Returns the pointer memory of allocated extcon_dev if success
- * or ERR_PTR(err) if fail
- */
-struct extcon_dev *devm_extcon_dev_allocate(struct device *dev,
-                                       const unsigned int *supported_cable)
-{
-       struct extcon_dev **ptr, *edev;
-
-       ptr = devres_alloc(devm_extcon_dev_release, sizeof(*ptr), GFP_KERNEL);
-       if (!ptr)
-               return ERR_PTR(-ENOMEM);
-
-       edev = extcon_dev_allocate(supported_cable);
-       if (IS_ERR(edev)) {
-               devres_free(ptr);
-               return edev;
-       }
-
-       edev->dev.parent = dev;
-
-       *ptr = edev;
-       devres_add(dev, ptr);
-
-       return edev;
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_allocate);
-
-void devm_extcon_dev_free(struct device *dev, struct extcon_dev *edev)
-{
-       WARN_ON(devres_release(dev, devm_extcon_dev_release,
-                              devm_extcon_dev_match, edev));
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_free);
-
 /**
  * extcon_dev_register() - Register a new extcon device
  * @edev       : the new extcon device (should be allocated before calling)
@@ -1018,63 +829,6 @@ void extcon_dev_unregister(struct extcon_dev *edev)
 }
 EXPORT_SYMBOL_GPL(extcon_dev_unregister);
 
-static void devm_extcon_dev_unreg(struct device *dev, void *res)
-{
-       extcon_dev_unregister(*(struct extcon_dev **)res);
-}
-
-/**
- * devm_extcon_dev_register() - Resource-managed extcon_dev_register()
- * @dev:       device to allocate extcon device
- * @edev:      the new extcon device to register
- *
- * Managed extcon_dev_register() function. If extcon device is attached with
- * this function, that extcon device is automatically unregistered on driver
- * detach. Internally this function calls extcon_dev_register() function.
- * To get more information, refer that function.
- *
- * If extcon device is registered with this function and the device needs to be
- * unregistered separately, devm_extcon_dev_unregister() should be used.
- *
- * Returns 0 if success or negaive error number if failure.
- */
-int devm_extcon_dev_register(struct device *dev, struct extcon_dev *edev)
-{
-       struct extcon_dev **ptr;
-       int ret;
-
-       ptr = devres_alloc(devm_extcon_dev_unreg, sizeof(*ptr), GFP_KERNEL);
-       if (!ptr)
-               return -ENOMEM;
-
-       ret = extcon_dev_register(edev);
-       if (ret) {
-               devres_free(ptr);
-               return ret;
-       }
-
-       *ptr = edev;
-       devres_add(dev, ptr);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_register);
-
-/**
- * devm_extcon_dev_unregister() - Resource-managed extcon_dev_unregister()
- * @dev:       device the extcon belongs to
- * @edev:      the extcon device to unregister
- *
- * Unregister extcon device that is registered with devm_extcon_dev_register()
- * function.
- */
-void devm_extcon_dev_unregister(struct device *dev, struct extcon_dev *edev)
-{
-       WARN_ON(devres_release(dev, devm_extcon_dev_unreg,
-                              devm_extcon_dev_match, edev));
-}
-EXPORT_SYMBOL_GPL(devm_extcon_dev_unregister);
-
 #ifdef CONFIG_OF
 /*
  * extcon_get_edev_by_phandle - Get the extcon device from devicetree
@@ -1107,10 +861,12 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
        list_for_each_entry(edev, &extcon_dev_list, entry) {
                if (edev->dev.parent && edev->dev.parent->of_node == node) {
                        mutex_unlock(&extcon_dev_list_lock);
+                       of_node_put(node);
                        return edev;
                }
        }
        mutex_unlock(&extcon_dev_list_lock);
+       of_node_put(node);
 
        return ERR_PTR(-EPROBE_DEFER);
 }
index 8dd0c7085e59799dce9064361e6e1ec7df4f0ccd..503bbe2a9d494fba15297eb7922d1a1e1f269d5f 100644 (file)
@@ -37,13 +37,13 @@ static int efibc_set_variable(const char *name, const char *value)
        size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
 
        if (size > sizeof(entry->var.Data)) {
-               pr_err("value is too large");
+               pr_err("value is too large (%zu bytes) for '%s' EFI variable\n", size, name);
                return -EINVAL;
        }
 
        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry) {
-               pr_err("failed to allocate efivar entry");
+               pr_err("failed to allocate efivar entry for '%s' EFI variable\n", name);
                return -ENOMEM;
        }
 
index 23bef6bb73ee58db932e7fd34100e4d850e73a4a..41958774cde301e5e1d1d5f3eef7be6e1eea0b59 100644 (file)
 #include <linux/stringify.h>
 #include <asm/efi.h>
 
-static void efi_call_virt_check_flags(unsigned long flags, const char *call)
+/*
+ * Wrap around the new efi_call_virt_generic() macros so that the
+ * code doesn't get too cluttered:
+ */
+#define efi_call_virt(f, args...)   \
+       efi_call_virt_pointer(efi.systab->runtime, f, args)
+#define __efi_call_virt(f, args...) \
+       __efi_call_virt_pointer(efi.systab->runtime, f, args)
+
+void efi_call_virt_check_flags(unsigned long flags, const char *call)
 {
        unsigned long cur_flags, mismatch;
 
@@ -38,48 +47,6 @@ static void efi_call_virt_check_flags(unsigned long flags, const char *call)
        local_irq_restore(flags);
 }
 
-/*
- * Arch code can implement the following three template macros, avoiding
- * reptition for the void/non-void return cases of {__,}efi_call_virt:
- *
- *  * arch_efi_call_virt_setup
- *
- *    Sets up the environment for the call (e.g. switching page tables,
- *    allowing kernel-mode use of floating point, if required).
- *
- *  * arch_efi_call_virt
- *
- *    Performs the call. The last expression in the macro must be the call
- *    itself, allowing the logic to be shared by the void and non-void
- *    cases.
- *
- *  * arch_efi_call_virt_teardown
- *
- *    Restores the usual kernel environment once the call has returned.
- */
-
-#define efi_call_virt(f, args...)                                      \
-({                                                                     \
-       efi_status_t __s;                                               \
-       unsigned long flags;                                            \
-       arch_efi_call_virt_setup();                                     \
-       local_save_flags(flags);                                        \
-       __s = arch_efi_call_virt(f, args);                              \
-       efi_call_virt_check_flags(flags, __stringify(f));               \
-       arch_efi_call_virt_teardown();                                  \
-       __s;                                                            \
-})
-
-#define __efi_call_virt(f, args...)                                    \
-({                                                                     \
-       unsigned long flags;                                            \
-       arch_efi_call_virt_setup();                                     \
-       local_save_flags(flags);                                        \
-       arch_efi_call_virt(f, args);                                    \
-       efi_call_virt_check_flags(flags, __stringify(f));               \
-       arch_efi_call_virt_teardown();                                  \
-})
-
 /*
  * According to section 7.1 of the UEFI spec, Runtime Services are not fully
  * reentrant, and there are particular combinations of calls that need to be
index cebcb405812ec0c8a98f771b94f8a4c5caddabc5..d7860614f87f532caeb21145c78352d38161b2a3 100644 (file)
@@ -49,7 +49,7 @@ config GPIO_DEVRES
 
 config OF_GPIO
        def_bool y
-       depends on OF || COMPILE_TEST
+       depends on OF
 
 config GPIO_ACPI
        def_bool y
@@ -402,9 +402,12 @@ config GPIO_TB10X
        select OF_GPIO
 
 config GPIO_TEGRA
-       bool
-       default y
+       bool "NVIDIA Tegra GPIO support"
+       default ARCH_TEGRA
        depends on ARCH_TEGRA || COMPILE_TEST
+       depends on OF
+       help
+         Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
 
 config GPIO_TS4800
        tristate "TS-4800 DIO blocks and compatibles"
index e85e7539cf5d23d0eae1d4a1433b236253dd10cc..eb43ae4835c15d8a65ac8f4fde2de07c7b6e19ee 100644 (file)
@@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
        return gpio % 8;
 }
 
-static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
+static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
 {
-       struct sch_gpio *sch = gpiochip_get_data(gc);
        unsigned short offset, bit;
        u8 reg_val;
 
@@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
        return reg_val;
 }
 
-static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
+static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
                             int val)
 {
-       struct sch_gpio *sch = gpiochip_get_data(gc);
        unsigned short offset, bit;
        u8 reg_val;
 
@@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
        struct sch_gpio *sch = gpiochip_get_data(gc);
 
        spin_lock(&sch->lock);
-       sch_gpio_reg_set(gc, gpio_num, GIO, 1);
+       sch_gpio_reg_set(sch, gpio_num, GIO, 1);
        spin_unlock(&sch->lock);
        return 0;
 }
 
 static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
 {
-       return sch_gpio_reg_get(gc, gpio_num, GLV);
+       struct sch_gpio *sch = gpiochip_get_data(gc);
+       return sch_gpio_reg_get(sch, gpio_num, GLV);
 }
 
 static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
@@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
        struct sch_gpio *sch = gpiochip_get_data(gc);
 
        spin_lock(&sch->lock);
-       sch_gpio_reg_set(gc, gpio_num, GLV, val);
+       sch_gpio_reg_set(sch, gpio_num, GLV, val);
        spin_unlock(&sch->lock);
 }
 
@@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
        struct sch_gpio *sch = gpiochip_get_data(gc);
 
        spin_lock(&sch->lock);
-       sch_gpio_reg_set(gc, gpio_num, GIO, 0);
+       sch_gpio_reg_set(sch, gpio_num, GIO, 0);
        spin_unlock(&sch->lock);
 
        /*
@@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
                 * GPIO7 is configured by the CMC as SLPIOVR
                 * Enable GPIO[9:8] core powered gpios explicitly
                 */
-               sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
-               sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
+               sch_gpio_reg_set(sch, 8, GEN, 1);
+               sch_gpio_reg_set(sch, 9, GEN, 1);
                /*
                 * SUS_GPIO[2:0] enabled by default
                 * Enable SUS_GPIO3 resume powered gpio explicitly
                 */
-               sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
+               sch_gpio_reg_set(sch, 13, GEN, 1);
                break;
 
        case PCI_DEVICE_ID_INTEL_ITC_LPC:
index 3a5c7011ad3b3e832d7cce6e55ed62562bba94be..8b830996fe0212d3ae0153ae5b708a84fa53976e 100644 (file)
@@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
        if (!desc && gpio_is_valid(gpio))
                return -EPROBE_DEFER;
 
+       err = gpiod_request(desc, label);
+       if (err)
+               return err;
+
        if (flags & GPIOF_OPEN_DRAIN)
                set_bit(FLAG_OPEN_DRAIN, &desc->flags);
 
@@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
        if (flags & GPIOF_ACTIVE_LOW)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
 
-       err = gpiod_request(desc, label);
-       if (err)
-               return err;
-
        if (flags & GPIOF_DIR_IN)
                err = gpiod_direction_input(desc);
        else
index 570771ed19e6f1f326575fa20cdc621ea2588432..be74bd370f1fc5443586d57076e9c04a80d74e1f 100644 (file)
@@ -1352,14 +1352,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
                spin_lock_irqsave(&gpio_lock, flags);
        }
 done:
-       if (status < 0) {
-               /* Clear flags that might have been set by the caller before
-                * requesting the GPIO.
-                */
-               clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
-               clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
-               clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
-       }
        spin_unlock_irqrestore(&gpio_lock, flags);
        return status;
 }
@@ -2587,28 +2579,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(gpiod_get_optional);
 
-/**
- * gpiod_parse_flags - helper function to parse GPIO lookup flags
- * @desc:      gpio to be setup
- * @lflags:    gpio_lookup_flags - returned from of_find_gpio() or
- *             of_get_gpio_hog()
- *
- * Set the GPIO descriptor flags based on the given GPIO lookup flags.
- */
-static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
-{
-       if (lflags & GPIO_ACTIVE_LOW)
-               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
-       if (lflags & GPIO_OPEN_DRAIN)
-               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
-       if (lflags & GPIO_OPEN_SOURCE)
-               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
-}
 
 /**
  * gpiod_configure_flags - helper function to configure a given GPIO
  * @desc:      gpio whose value will be assigned
  * @con_id:    function within the GPIO consumer
+ * @lflags:    gpio_lookup_flags - returned from of_find_gpio() or
+ *             of_get_gpio_hog()
  * @dflags:    gpiod_flags - optional GPIO initialization flags
  *
  * Return 0 on success, -ENOENT if no GPIO has been assigned to the
@@ -2616,10 +2593,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
  * occurred while trying to acquire the GPIO.
  */
 static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
-                                enum gpiod_flags dflags)
+               unsigned long lflags, enum gpiod_flags dflags)
 {
        int status;
 
+       if (lflags & GPIO_ACTIVE_LOW)
+               set_bit(FLAG_ACTIVE_LOW, &desc->flags);
+       if (lflags & GPIO_OPEN_DRAIN)
+               set_bit(FLAG_OPEN_DRAIN, &desc->flags);
+       if (lflags & GPIO_OPEN_SOURCE)
+               set_bit(FLAG_OPEN_SOURCE, &desc->flags);
+
        /* No particular flag request, return here... */
        if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
                pr_debug("no flags found for %s\n", con_id);
@@ -2686,13 +2670,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
                return desc;
        }
 
-       gpiod_parse_flags(desc, lookupflags);
-
        status = gpiod_request(desc, con_id);
        if (status < 0)
                return ERR_PTR(status);
 
-       status = gpiod_configure_flags(desc, con_id, flags);
+       status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
        if (status < 0) {
                dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
                gpiod_put(desc);
@@ -2748,6 +2730,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
        if (IS_ERR(desc))
                return desc;
 
+       ret = gpiod_request(desc, NULL);
+       if (ret)
+               return ERR_PTR(ret);
+
        if (active_low)
                set_bit(FLAG_ACTIVE_LOW, &desc->flags);
 
@@ -2758,10 +2744,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
                        set_bit(FLAG_OPEN_SOURCE, &desc->flags);
        }
 
-       ret = gpiod_request(desc, NULL);
-       if (ret)
-               return ERR_PTR(ret);
-
        return desc;
 }
 EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
@@ -2814,8 +2796,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
        chip = gpiod_to_chip(desc);
        hwnum = gpio_chip_hwgpio(desc);
 
-       gpiod_parse_flags(desc, lflags);
-
        local_desc = gpiochip_request_own_desc(chip, hwnum, name);
        if (IS_ERR(local_desc)) {
                status = PTR_ERR(local_desc);
@@ -2824,7 +2804,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
                return status;
        }
 
-       status = gpiod_configure_flags(desc, name, dflags);
+       status = gpiod_configure_flags(desc, name, lflags, dflags);
        if (status < 0) {
                pr_err("setup of hog GPIO %s (chip %s, offset %d) failed, %d\n",
                       name, chip->label, hwnum, status);
index 13cdb01e9b45022aebf6d052523d718d52ceaa91..bc56c8a181e628b575861233bd705dd986f9f061 100644 (file)
@@ -156,3 +156,18 @@ u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
 }
 
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
+{
+       PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+       int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
+
+       args.ucRegIndex = offset;
+       args.lpI2CDataOut = data;
+       args.ucFlag = 1;
+       args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
+       args.ucTransBytes = 1;
+       args.ucSlaveAddr = slave_addr;
+       args.ucLineNumber = line_number;
+
+       amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+}
index d6128d9de56e4b9db096de8e07d30d51e2e4a4f2..251aaf41f65d5a277104c3b9fda29fc71bbca680 100644 (file)
@@ -27,5 +27,7 @@
 int amdgpu_atombios_i2c_xfer(struct i2c_adapter *i2c_adap,
                      struct i2c_msg *msgs, int num);
 u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap);
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev,
+               u8 slave_addr, u8 line_number, u8 offset, u8 data);
 
 #endif
index b2ebd4fef6cfc12b7bbd746df5f60492ad48d9ba..c2ef94511f7020da5e1ff4dcfa8b8ed854246637 100644 (file)
@@ -28,6 +28,7 @@
 #include "vid.h"
 #include "amdgpu_ucode.h"
 #include "amdgpu_atombios.h"
+#include "atombios_i2c.h"
 #include "clearstate_vi.h"
 
 #include "gmc/gmc_8_2_d.h"
@@ -284,6 +285,7 @@ static const u32 golden_settings_polaris11_a11[] =
        mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f3,
        mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
        mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003210,
+       mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 };
 
 static const u32 polaris11_golden_common_all[] =
@@ -314,6 +316,7 @@ static const u32 golden_settings_polaris10_a11[] =
        mmTCC_CTRL, 0x00100000, 0xf31fff7f,
        mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7,
        mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000,
+       mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
 };
 
 static const u32 polaris10_golden_common_all[] =
@@ -696,6 +699,10 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
                                                 polaris10_golden_common_all,
                                                 (const u32)ARRAY_SIZE(polaris10_golden_common_all));
                WREG32_SMC(ixCG_ACLK_CNTL, 0x0000001C);
+               if (adev->pdev->revision == 0xc7) {
+                       amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1E, 0xDD);
+                       amdgpu_atombios_i2c_channel_trans(adev, 0x10, 0x96, 0x1F, 0xD0);
+               }
                break;
        case CHIP_CARRIZO:
                amdgpu_program_register_sequence(adev,
index ec2a7ada346a74138401849aea3a1e225435ef16..91e25f942d909b0ac6c07d85632514cef6a28023 100644 (file)
@@ -98,7 +98,6 @@
 #define PCIE_BUS_CLK                10000
 #define TCLK                        (PCIE_BUS_CLK / 10)
 
-#define CEILING_UCHAR(double) ((double-(uint8_t)(double)) > 0 ? (uint8_t)(double+1) : (uint8_t)(double))
 
 static const uint16_t polaris10_clock_stretcher_lookup_table[2][4] =
 { {600, 1050, 3, 0}, {600, 1050, 6, 1} };
@@ -733,7 +732,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        table->Smio[level] |=
                                data->mvdd_voltage_table.entries[level].smio_low;
                }
-               table->SmioMask2 = data->vddci_voltage_table.mask_low;
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
 
                table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count);
        }
@@ -1807,27 +1806,25 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
 
        ro = efuse * (max -min)/255 + min;
 
-       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset
-        * there is a little difference in calculating
-        * volt_with_cks with windows */
+       /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
        for (i = 0; i < sclk_table->count; i++) {
                data->smc_state_table.Sclk_CKS_masterEn0_7 |=
                                sclk_table->entries[i].cks_enable << i;
                if (hwmgr->chip_id == CHIP_POLARIS10) {
-                       volt_without_cks = (uint32_t)((2753594000 + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
+                       volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * 136418 -(ro - 70) * 1000000) / \
                                                (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000));
-                       volt_with_cks = (uint32_t)((279720200 + sclk_table->entries[i].clk * 3232 - (ro - 65) * 100000000) / \
-                                       (252248000 - sclk_table->entries[i].clk/100 * 115764));
+                       volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * 3232 - (ro - 65) * 1000000) / \
+                                       (2522480 - sclk_table->entries[i].clk/100 * 115764/100));
                } else {
-                       volt_without_cks = (uint32_t)((2416794800 + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
-                                               (2625416 - (sclk_table->entries[i].clk/100) * 12586807/10000));
-                       volt_with_cks = (uint32_t)((2999656000 + sclk_table->entries[i].clk * 392803/100 - (ro - 44) * 1000000) / \
-                                       (3422454 - sclk_table->entries[i].clk/100 * 18886376/10000));
+                       volt_without_cks = (uint32_t)((2416794800U + (sclk_table->entries[i].clk/100) * 1476925/10 -(ro - 50) * 1000000) / \
+                                               (2625416 - (sclk_table->entries[i].clk/100) * (12586807/10000)));
+                       volt_with_cks = (uint32_t)((2999656000U - sclk_table->entries[i].clk/100 * 392803 - (ro - 44) * 1000000) / \
+                                       (3422454 - sclk_table->entries[i].clk/100 * (18886376/10000)));
                }
 
                if (volt_without_cks >= volt_with_cks)
-                       volt_offset = (uint8_t)CEILING_UCHAR((volt_without_cks - volt_with_cks +
-                                       sclk_table->entries[i].cks_voffset) * 100 / 625);
+                       volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
+                                       sclk_table->entries[i].cks_voffset) * 100 + 624) / 625);
 
                data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
        }
@@ -2685,7 +2682,7 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        uint16_t vv_id;
-       uint16_t vddc = 0;
+       uint32_t vddc = 0;
        uint16_t i, j;
        uint32_t sclk = 0;
        struct phm_ppt_v1_information *table_info =
@@ -2716,8 +2713,9 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
                                                continue);
 
 
-                       /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
-                       PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
+                       /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
+                        * real voltage level in unit of 0.01mv */
+                       PP_ASSERT_WITH_CODE((vddc < 200000 && vddc != 0),
                                        "Invalid VDDC value", result = -EINVAL;);
 
                        /* the voltage should not be zero nor equal to leakage ID */
index bf4e18fd38724e3f285a58a4e7595d481fc33411..90b35c5c10a4f01b0c584ccb3dbb30c482208470 100644 (file)
@@ -1256,7 +1256,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
 }
 
 int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
-                               uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage)
+                               uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage)
 {
 
        int result;
@@ -1274,7 +1274,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
        if (0 != result)
                return result;
 
-       *voltage = get_voltage_info_param_space.usVoltageLevel;
+       *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel;
 
        return result;
 }
index 248c5db5f38025559d345d311f1cfe3d7e1eb1ad..1e35a9625baf913ab8733604ad3a67a4e86ee2e4 100644 (file)
@@ -305,7 +305,7 @@ extern int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t
 extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
                                                                uint8_t level);
 extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
-                               uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
+                               uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage);
 extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
 
 extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
index 233eb7f36c1da19e76689d430f83e4cc12e1fc9d..5d0f655bf160b85582383d8fcca771af00d43ae7 100644 (file)
@@ -1302,7 +1302,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        table->Smio[count] |=
                                data->mvdd_voltage_table.entries[count].smio_low;
                }
-               table->SmioMask2 = data->vddci_voltage_table.mask_low;
+               table->SmioMask2 = data->mvdd_voltage_table.mask_low;
 
                CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
        }
index 671fdb4d615a4812d624c30daa20e8192e159346..dccc859f638c2b8f4769e4f699f7742ca9432240 100644 (file)
@@ -302,7 +302,7 @@ static int init_dpm_2_parameters(
                        (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
 
                if (0 != powerplay_table->usPPMTableOffset) {
-                       if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
+                       if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
                                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_EnablePlatformPowerManagement);
                        }
index f313b4d8344f4e6abf8f0784ae9bf98abdc5eb9e..85c4debf47e04ebc6ed5300a2974b037a227376d 100644 (file)
@@ -512,6 +512,10 @@ void intel_detect_pch(struct drm_device *dev)
                                DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
                                WARN_ON(!IS_SKYLAKE(dev) &&
                                        !IS_KABYLAKE(dev));
+                       } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
+                               dev_priv->pch_type = PCH_KBP;
+                               DRM_DEBUG_KMS("Found KabyPoint PCH\n");
+                               WARN_ON(!IS_KABYLAKE(dev));
                        } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
                                   (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
                                   ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
index 7c334e9022660340fb826ff627368dab7b594f00..bc3f2e6842e7be13771fa4b638c9cc85f36ad359 100644 (file)
@@ -990,6 +990,7 @@ enum intel_pch {
        PCH_CPT,        /* Cougarpoint PCH */
        PCH_LPT,        /* Lynxpoint PCH */
        PCH_SPT,        /* Sunrisepoint PCH */
+       PCH_KBP,        /* Kabypoint PCH */
        PCH_NOP,
 };
 
@@ -2600,6 +2601,15 @@ struct drm_i915_cmd_table {
 
 #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
 
+#define KBL_REVID_A0           0x0
+#define KBL_REVID_B0           0x1
+#define KBL_REVID_C0           0x2
+#define KBL_REVID_D0           0x3
+#define KBL_REVID_E0           0x4
+
+#define IS_KBL_REVID(p, since, until) \
+       (IS_KABYLAKE(p) && IS_REVID(p, since, until))
+
 /*
  * The genX designation typically refers to the render engine, so render
  * capability related checks should use IS_GEN, while display and other checks
@@ -2708,11 +2718,13 @@ struct drm_i915_cmd_table {
 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE                0x9c00
 #define INTEL_PCH_SPT_DEVICE_ID_TYPE           0xA100
 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE                0x9D00
+#define INTEL_PCH_KBP_DEVICE_ID_TYPE           0xA200
 #define INTEL_PCH_P2X_DEVICE_ID_TYPE           0x7100
 #define INTEL_PCH_P3X_DEVICE_ID_TYPE           0x7000
 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE          0x2900 /* qemu q35 has 2918 */
 
 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
+#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
 #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
index 425e721aac58e5cd0d7dd3d80f984f118fcfc960..66571466e9a88d6380ddc0f6d668c4240488d90a 100644 (file)
@@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
        if (!mutex_is_locked(mutex))
                return false;
 
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
        return mutex->owner == task;
 #else
        /* Since UP may be pre-empted, we cannot assume that we own the lock */
index b7ce963fb8f8d23345cab6cf0b6e4e446e6e1b3c..44004e3f09e446d108f635444ed9c67818deed7f 100644 (file)
@@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
                return -ENODEV;
 
        /* See the comment at the drm_mm_init() call for more about this check.
-        * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */
-       if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096)
+        * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
+        */
+       if (start < 4096 && (IS_GEN8(dev_priv) ||
+                            IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
                start = 4096;
 
        mutex_lock(&dev_priv->mm.stolen_lock);
index 2f6fd33c07ba2aaabbce9764cf4a66178637199c..aab47f7bb61b9aae2478cc22e03a8406ea6cd64c 100644 (file)
@@ -2471,7 +2471,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
                        I915_WRITE(SDEIIR, iir);
                        ret = IRQ_HANDLED;
 
-                       if (HAS_PCH_SPT(dev_priv))
+                       if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
                                spt_irq_handler(dev, iir);
                        else
                                cpt_irq_handler(dev, iir);
@@ -4661,7 +4661,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
                dev->driver->disable_vblank = gen8_disable_vblank;
                if (IS_BROXTON(dev))
                        dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
-               else if (HAS_PCH_SPT(dev))
+               else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
                        dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
                else
                        dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
index b407411e31ba8e649ccbec2255888a976fffde2c..3fcf7dd5b6ca5585a48f921d287b863b126a80dd 100644 (file)
@@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   ECOCHK_PPGTT_WT_HSW          (0x2<<3)
 #define   ECOCHK_PPGTT_WB_HSW          (0x3<<3)
 
+#define GEN8_CONFIG0                   _MMIO(0xD00)
+#define  GEN9_DEFAULT_FIXES            (1 << 3 | 1 << 2 | 1 << 1)
+
 #define GAC_ECO_BITS                   _MMIO(0x14090)
 #define   ECOBITS_SNB_BIT              (1<<13)
 #define   ECOBITS_PPGTT_CACHE64B       (3<<8)
@@ -1669,6 +1672,9 @@ enum skl_disp_power_wells {
 
 #define GEN7_TLB_RD_ADDR       _MMIO(0x4700)
 
+#define GAMT_CHKN_BIT_REG      _MMIO(0x4ab8)
+#define   GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING     (1<<28)
+
 #if 0
 #define PRB0_TAIL      _MMIO(0x2030)
 #define PRB0_HEAD      _MMIO(0x2034)
@@ -1804,6 +1810,10 @@ enum skl_disp_power_wells {
 #define   GEN9_IZ_HASHING_MASK(slice)                  (0x3 << ((slice) * 2))
 #define   GEN9_IZ_HASHING(slice, val)                  ((val) << ((slice) * 2))
 
+/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
+#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
+#define   GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
+
 /* WaClearTdlStateAckDirtyBits */
 #define GEN8_STATE_ACK         _MMIO(0x20F0)
 #define GEN9_STATE_ACK_SLICE1  _MMIO(0x20F8)
@@ -2200,6 +2210,8 @@ enum skl_disp_power_wells {
 #define ILK_DPFC_STATUS                _MMIO(0x43210)
 #define ILK_DPFC_FENCE_YOFF    _MMIO(0x43218)
 #define ILK_DPFC_CHICKEN       _MMIO(0x43224)
+#define   ILK_DPFC_DISABLE_DUMMY0 (1<<8)
+#define   ILK_DPFC_NUKE_ON_ANY_MODIFICATION    (1<<23)
 #define ILK_FBC_RT_BASE                _MMIO(0x2128)
 #define   ILK_FBC_RT_VALID     (1<<0)
 #define   SNB_FBC_FRONT_BUFFER (1<<1)
@@ -6031,6 +6043,7 @@ enum skl_disp_power_wells {
 #define CHICKEN_PAR1_1         _MMIO(0x42080)
 #define  DPA_MASK_VBLANK_SRD   (1 << 15)
 #define  FORCE_ARB_IDLE_PLANES (1 << 14)
+#define  SKL_EDP_PSR_FIX_RDWRAP        (1 << 3)
 
 #define _CHICKEN_PIPESL_1_A    0x420b0
 #define _CHICKEN_PIPESL_1_B    0x420b4
@@ -6039,6 +6052,7 @@ enum skl_disp_power_wells {
 #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
 #define DISP_ARB_CTL   _MMIO(0x45000)
+#define  DISP_FBC_MEMORY_WAKE          (1<<31)
 #define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
 #define  DISP_FBC_WM_DIS               (1<<15)
 #define DISP_ARB_CTL2  _MMIO(0x45004)
@@ -6052,6 +6066,9 @@ enum skl_disp_power_wells {
 #define HSW_NDE_RSTWRN_OPT     _MMIO(0x46408)
 #define  RESET_PCH_HANDSHAKE_ENABLE    (1<<4)
 
+#define GEN8_CHICKEN_DCPR_1            _MMIO(0x46430)
+#define   MASK_WAKEMEM                 (1<<13)
+
 #define SKL_DFSM                       _MMIO(0x51000)
 #define SKL_DFSM_CDCLK_LIMIT_MASK      (3 << 23)
 #define SKL_DFSM_CDCLK_LIMIT_675       (0 << 23)
@@ -6069,6 +6086,7 @@ enum skl_disp_power_wells {
 #define  GEN9_TSG_BARRIER_ACK_DISABLE          (1<<8)
 
 #define GEN9_CS_DEBUG_MODE1            _MMIO(0x20ec)
+#define GEN9_CTX_PREEMPT_REG           _MMIO(0x2248)
 #define GEN8_CS_CHICKEN1               _MMIO(0x2580)
 
 /* GEN7 chicken */
@@ -6076,6 +6094,7 @@ enum skl_disp_power_wells {
 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC     ((1<<10) | (1<<26))
 # define GEN9_RHWO_OPTIMIZATION_DISABLE                (1<<14)
 #define COMMON_SLICE_CHICKEN2                  _MMIO(0x7014)
+# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
 # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE  (1<<0)
 
 #define HIZ_CHICKEN                                    _MMIO(0x7018)
@@ -6921,6 +6940,7 @@ enum skl_disp_power_wells {
 #define    EDRAM_SETS_IDX(cap)                 (((cap) >> 8) & 0x3)
 
 #define GEN6_UCGCTL1                           _MMIO(0x9400)
+# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE               (1 << 22)
 # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE             (1 << 16)
 # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE               (1 << 5)
 # define GEN6_CSUNIT_CLOCK_GATE_DISABLE                        (1 << 7)
@@ -6937,6 +6957,7 @@ enum skl_disp_power_wells {
 
 #define GEN7_UCGCTL4                           _MMIO(0x940c)
 #define  GEN7_L3BANK2X_CLOCK_GATE_DISABLE      (1<<25)
+#define  GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE     (1<<14)
 
 #define GEN6_RCGCTL1                           _MMIO(0x9410)
 #define GEN6_RCGCTL2                           _MMIO(0x9414)
index a34c23eceba0448bb93b5afdb338a82db90e179f..2b3b428d9cd2b01e4d5d3627e89e712b18937547 100644 (file)
  * be moved to FW_FAILED.
  */
 
+#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_KBL);
+#define KBL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
+
 #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_SKL);
+#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 23)
+
 #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
+MODULE_FIRMWARE(I915_CSR_BXT);
+#define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
 
 #define FIRMWARE_URL  "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
 
-MODULE_FIRMWARE(I915_CSR_SKL);
-MODULE_FIRMWARE(I915_CSR_BXT);
 
-#define SKL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 23)
-#define BXT_CSR_VERSION_REQUIRED       CSR_VERSION(1, 7)
+
 
 #define CSR_MAX_FW_SIZE                        0x2FFF
 #define CSR_DEFAULT_FW_OFFSET          0xFFFFFFFF
@@ -169,12 +175,10 @@ struct stepping_info {
        char substepping;
 };
 
-/*
- * Kabylake derivated from Skylake H0, so SKL H0
- * is the right firmware for KBL A0 (revid 0).
- */
 static const struct stepping_info kbl_stepping_info[] = {
-       {'H', '0'}, {'I', '0'}
+       {'A', '0'}, {'B', '0'}, {'C', '0'},
+       {'D', '0'}, {'E', '0'}, {'F', '0'},
+       {'G', '0'}, {'H', '0'}, {'I', '0'},
 };
 
 static const struct stepping_info skl_stepping_info[] = {
@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
        csr->version = css_header->version;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_KABYLAKE(dev_priv)) {
+               required_min_version = KBL_CSR_VERSION_REQUIRED;
+       } else if (IS_SKYLAKE(dev_priv)) {
                required_min_version = SKL_CSR_VERSION_REQUIRED;
        } else if (IS_BROXTON(dev_priv)) {
                required_min_version = BXT_CSR_VERSION_REQUIRED;
@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
        if (!HAS_CSR(dev_priv))
                return;
 
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+       if (IS_KABYLAKE(dev_priv))
+               csr->fw_path = I915_CSR_KBL;
+       else if (IS_SKYLAKE(dev_priv))
                csr->fw_path = I915_CSR_SKL;
        else if (IS_BROXTON(dev_priv))
                csr->fw_path = I915_CSR_BXT;
index 04452cf3eae862836da54cc08b2df9f0d2d6a9fe..3074c56a643d46436b4ce6e626407638c118fbb0 100644 (file)
@@ -11997,6 +11997,12 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
                ret = intel_color_check(crtc, crtc_state);
                if (ret)
                        return ret;
+
+               /*
+                * Changing color management on Intel hardware is
+                * handled as part of planes update.
+                */
+               crtc_state->planes_changed = true;
        }
 
        ret = 0;
index 40745e38d4382df559d3d3106bf7278324dfbb5b..891107f92d9fa7e8de0aebe02e44e256e45bb4e1 100644 (file)
@@ -4645,7 +4645,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
 
        intel_dp->detect_done = false;
 
-       if (intel_connector->detect_edid)
+       if (is_edp(intel_dp) || intel_connector->detect_edid)
                return connector_status_connected;
        else
                return connector_status_disconnected;
index 42eac37de047b31b1b496a86ac8bf717569e3cfe..7f2d8415ed8b219b6ff5a2c16bf4c24fde66b90c 100644 (file)
@@ -1103,15 +1103,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
                                                uint32_t *const batch,
                                                uint32_t index)
 {
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
        uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
 
        /*
-        * WaDisableLSQCROPERFforOCL:skl
+        * WaDisableLSQCROPERFforOCL:skl,kbl
         * This WA is implemented in skl_init_clock_gating() but since
         * this batch updates GEN8_L3SQCREG4 with default value we need to
         * set this bit here to retain the WA during flush.
         */
-       if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0))
+       if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
+           IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
                l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
 
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
@@ -1273,6 +1275,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
 {
        int ret;
        struct drm_device *dev = engine->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
 
        /* WaDisableCtxRestoreArbitration:skl,bxt */
@@ -1286,6 +1289,22 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
                return ret;
        index = ret;
 
+       /* WaClearSlmSpaceAtContextSwitch:kbl */
+       /* Actual scratch location is at 128 bytes offset */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
+               uint32_t scratch_addr
+                       = engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
+
+               wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
+               wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
+                                          PIPE_CONTROL_GLOBAL_GTT_IVB |
+                                          PIPE_CONTROL_CS_STALL |
+                                          PIPE_CONTROL_QW_WRITE));
+               wa_ctx_emit(batch, index, scratch_addr);
+               wa_ctx_emit(batch, index, 0);
+               wa_ctx_emit(batch, index, 0);
+               wa_ctx_emit(batch, index, 0);
+       }
        /* Pad to end of cacheline */
        while (index % CACHELINE_DWORDS)
                wa_ctx_emit(batch, index, MI_NOOP);
@@ -1687,9 +1706,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct intel_engine_cs *engine = ringbuf->engine;
        u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
-       bool vf_flush_wa = false;
+       bool vf_flush_wa = false, dc_flush_wa = false;
        u32 flags = 0;
        int ret;
+       int len;
 
        flags |= PIPE_CONTROL_CS_STALL;
 
@@ -1716,9 +1736,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
                 */
                if (IS_GEN9(engine->dev))
                        vf_flush_wa = true;
+
+               /* WaForGAMHang:kbl */
+               if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
+                       dc_flush_wa = true;
        }
 
-       ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
+       len = 6;
+
+       if (vf_flush_wa)
+               len += 6;
+
+       if (dc_flush_wa)
+               len += 12;
+
+       ret = intel_ring_begin(request, len);
        if (ret)
                return ret;
 
@@ -1731,12 +1763,31 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
                intel_logical_ring_emit(ringbuf, 0);
        }
 
+       if (dc_flush_wa) {
+               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+               intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+       }
+
        intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
        intel_logical_ring_emit(ringbuf, flags);
        intel_logical_ring_emit(ringbuf, scratch_addr);
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, 0);
+
+       if (dc_flush_wa) {
+               intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
+               intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+               intel_logical_ring_emit(ringbuf, 0);
+       }
+
        intel_logical_ring_advance(ringbuf);
 
        return 0;
index 99e26034ae8d0df783d706839b67b0fd2f161585..16e209d326b69469278fd93f47e8d7e26237a29c 100644 (file)
@@ -1038,5 +1038,16 @@ intel_opregion_get_panel_type(struct drm_device *dev)
                return -ENODEV;
        }
 
+       /*
+        * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
+        * low vswing for eDP, whereas the VBT panel type (2) gives us normal
+        * vswing instead. Low vswing results in some display flickers, so
+        * let's simply ignore the OpRegion panel type on SKL for now.
+        */
+       if (IS_SKYLAKE(dev)) {
+               DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+               return -ENODEV;
+       }
+
        return ret - 1;
 }
index 8357d571553a56471ff42ab0f048bab7f52f475f..aba94099886bf4ce9f402e6c253dc767636d3d56 100644 (file)
@@ -1731,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
                panel->backlight.set = bxt_set_backlight;
                panel->backlight.get = bxt_get_backlight;
                panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
-       } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) {
+       } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
+                  HAS_PCH_KBP(dev_priv)) {
                panel->backlight.setup = lpt_setup_backlight;
                panel->backlight.enable = lpt_enable_backlight;
                panel->backlight.disable = lpt_disable_backlight;
index a7ef45da0a9e8dc517f13a66f0f6429aa6334609..2863b92c9da6d157bd45bdf4760b514e403899a6 100644 (file)
 #define INTEL_RC6p_ENABLE                      (1<<1)
 #define INTEL_RC6pp_ENABLE                     (1<<2)
 
+static void gen9_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
+       I915_WRITE(CHICKEN_PAR1_1,
+                  I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
+
+       I915_WRITE(GEN8_CONFIG0,
+                  I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
+
+       /* WaEnableChickenDCPR:skl,bxt,kbl */
+       I915_WRITE(GEN8_CHICKEN_DCPR_1,
+                  I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
+
+       /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
+       /* WaFbcWakeMemOn:skl,bxt,kbl */
+       I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
+                  DISP_FBC_WM_DIS |
+                  DISP_FBC_MEMORY_WAKE);
+
+       /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
+       I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+                  ILK_DPFC_DISABLE_DUMMY0);
+}
+
 static void bxt_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       gen9_init_clock_gating(dev);
+
        /* WaDisableSDEUnitClockGating:bxt */
        I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
                   GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -6698,6 +6726,38 @@ static void lpt_suspend_hw(struct drm_device *dev)
        }
 }
 
+static void kabylake_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       gen9_init_clock_gating(dev);
+
+       /* WaDisableSDEUnitClockGating:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                          GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaDisableGamClockGating:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
+                          GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaFbcNukeOnHostModify:kbl */
+       I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+                  ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
+static void skylake_init_clock_gating(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       gen9_init_clock_gating(dev);
+
+       /* WaFbcNukeOnHostModify:skl */
+       I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
+                  ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
+}
+
 static void broadwell_init_clock_gating(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7163,9 +7223,9 @@ static void nop_init_clock_gating(struct drm_device *dev)
 void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
 {
        if (IS_SKYLAKE(dev_priv))
-               dev_priv->display.init_clock_gating = nop_init_clock_gating;
+               dev_priv->display.init_clock_gating = skylake_init_clock_gating;
        else if (IS_KABYLAKE(dev_priv))
-               dev_priv->display.init_clock_gating = nop_init_clock_gating;
+               dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
        else if (IS_BROXTON(dev_priv))
                dev_priv->display.init_clock_gating = bxt_init_clock_gating;
        else if (IS_BROADWELL(dev_priv))
index 04402bb9d26b9e96cac9225b37d31587891521f6..68c5af079ef85fd1cfc7ba23338f475f075454c6 100644 (file)
@@ -913,24 +913,26 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
 {
        struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       uint32_t tmp;
        int ret;
 
-       /* WaEnableLbsSlaRetryTimerDecrement:skl */
+       /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
+       I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+       /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
        I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
                   GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
 
-       /* WaDisableKillLogic:bxt,skl */
+       /* WaDisableKillLogic:bxt,skl,kbl */
        I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
                   ECOCHK_DIS_TLB);
 
-       /* WaClearFlowControlGpgpuContextSave:skl,bxt */
-       /* WaDisablePartialInstShootdown:skl,bxt */
+       /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
+       /* WaDisablePartialInstShootdown:skl,bxt,kbl */
        WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
                          FLOW_CONTROL_ENABLE |
                          PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
 
-       /* Syncing dependencies between camera and graphics:skl,bxt */
+       /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
                          GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
 
@@ -952,18 +954,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
                 */
        }
 
-       /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
-       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */
+       /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
+       /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
        WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
                          GEN9_ENABLE_YV12_BUGFIX |
                          GEN9_ENABLE_GPGPU_PREEMPTION);
 
-       /* Wa4x4STCOptimizationDisable:skl,bxt */
-       /* WaDisablePartialResolveInVc:skl,bxt */
+       /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
+       /* WaDisablePartialResolveInVc:skl,bxt,kbl */
        WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
                                         GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
 
-       /* WaCcsTlbPrefetchDisable:skl,bxt */
+       /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
        WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
                          GEN9_CCS_TLB_PREFETCH_ENABLE);
 
@@ -973,31 +975,57 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
                WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
                                  PIXEL_MASK_CAMMING_DISABLE);
 
-       /* WaForceContextSaveRestoreNonCoherent:skl,bxt */
-       tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT;
-       if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) ||
-           IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER))
-               tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
-       WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
+       /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+                         HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+       /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+        * both tied to WaForceContextSaveRestoreNonCoherent
+        * in some hsds for skl. We keep the tie for all gen9. The
+        * documentation is a bit hazy and so we want to get common behaviour,
+        * even though there is no clear evidence we would need both on kbl/bxt.
+        * This area has been source of system hangs so we play it safe
+        * and mimic the skl regardless of what bspec says.
+        *
+        * Use Force Non-Coherent whenever executing a 3D context. This
+        * is a workaround for a possible hang in the unlikely event
+        * a TLB invalidation occurs during a PSD flush.
+        */
 
-       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
-       if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0))
+       /* WaForceEnableNonCoherent:skl,bxt,kbl */
+       WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                         HDC_FORCE_NON_COHERENT);
+
+       /* WaDisableHDCInvalidation:skl,bxt,kbl */
+       I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+                  BDW_DISABLE_HDC_INVALIDATION);
+
+       /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+       if (IS_SKYLAKE(dev_priv) ||
+           IS_KABYLAKE(dev_priv) ||
+           IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
                WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
                                  GEN8_SAMPLER_POWER_BYPASS_DIS);
 
-       /* WaDisableSTUnitPowerOptimization:skl,bxt */
+       /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
-       /* WaOCLCoherentLineFlush:skl,bxt */
+       /* WaOCLCoherentLineFlush:skl,bxt,kbl */
        I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
                                    GEN8_LQSC_FLUSH_COHERENT_LINES));
 
-       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
+       /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
+       ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+       if (ret)
+               return ret;
+
+       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
        ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
        if (ret)
                return ret;
 
-       /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
+       /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
        ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
        if (ret)
                return ret;
@@ -1092,22 +1120,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
-       /* This is tied to WaForceContextSaveRestoreNonCoherent */
-       if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
-               /*
-                *Use Force Non-Coherent whenever executing a 3D context. This
-                * is a workaround for a possible hang in the unlikely event
-                * a TLB invalidation occurs during a PSD flush.
-                */
-               /* WaForceEnableNonCoherent:skl */
-               WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                                 HDC_FORCE_NON_COHERENT);
-
-               /* WaDisableHDCInvalidation:skl */
-               I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
-                          BDW_DISABLE_HDC_INVALIDATION);
-       }
-
        /* WaBarrierPerformanceFixDisable:skl */
        if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
                WA_SET_BIT_MASKED(HDC_CHICKEN0,
@@ -1120,6 +1132,9 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
+       /* WaDisableGafsUnitClkGating:skl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
        /* WaDisableLSQCROPERFforOCL:skl */
        ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
        if (ret)
@@ -1174,6 +1189,63 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
                        return ret;
        }
 
+       /* WaInsertDummyPushConstPs:bxt */
+       if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+       struct drm_i915_private *dev_priv = engine->dev->dev_private;
+       int ret;
+
+       ret = gen9_init_workarounds(engine);
+       if (ret)
+               return ret;
+
+       /* WaEnableGapsTsvCreditFix:kbl */
+       I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+                                  GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+       /* WaDisableDynamicCreditSharing:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               WA_SET_BIT(GAMT_CHKN_BIT_REG,
+                          GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+       /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+       if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FENCE_DEST_SLM_DISABLE);
+
+       /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
+        * involving this register should also be added to WA batch as required.
+        */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
+               /* WaDisableLSQCROPERFforOCL:kbl */
+               I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
+                          GEN8_LQSC_RO_PERF_DIS);
+
+       /* WaInsertDummyPushConstPs:kbl */
+       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+               WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+                                 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+       /* WaDisableGafsUnitClkGating:kbl */
+       WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+       /* WaDisableSbeCacheDispatchPortSharing:kbl */
+       WA_SET_BIT_MASKED(
+               GEN7_HALF_SLICE_CHICKEN1,
+               GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+       /* WaDisableLSQCROPERFforOCL:kbl */
+       ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
@@ -1199,6 +1271,9 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
        if (IS_BROXTON(dev))
                return bxt_init_workarounds(engine);
 
+       if (IS_KABYLAKE(dev_priv))
+               return kbl_init_workarounds(engine);
+
        return 0;
 }
 
index 22706c0a54b53a9f95a1ad7e453217b7276cfcaf..49bd5da194e1b368b0d069825e6fb03b03166684 100644 (file)
@@ -40,7 +40,8 @@ static int
 gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
 {
        struct nvkm_device *device = outp->base.disp->engine.subdev.device;
-       nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern);
+       const u32 soff = gf119_sor_soff(outp);
+       nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
        return 0;
 }
 
index 4182a21f5923dddbc06d2042bb82837f0f7ab0f5..41cacecbea9a85e280a6978ed41b91752a3bcced 100644 (file)
@@ -65,6 +65,14 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
        DRM_DEBUG_DRIVER("Disabling the CRTC\n");
 
        sun4i_tcon_disable(drv->tcon);
+
+       if (crtc->state->event && !crtc->state->active) {
+               spin_lock_irq(&crtc->dev->event_lock);
+               drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               spin_unlock_irq(&crtc->dev->event_lock);
+
+               crtc->state->event = NULL;
+       }
 }
 
 static void sun4i_crtc_enable(struct drm_crtc *crtc)
index 257d2b4f36456a10e0d4bdabd4407deb1aa2b581..937394cbc24122372518961e6b343abe9ab3a680 100644 (file)
@@ -92,7 +92,7 @@ static struct drm_driver sun4i_drv_driver = {
        /* Frame Buffer Operations */
 
        /* VBlank Operations */
-       .get_vblank_counter     = drm_vblank_count,
+       .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = sun4i_drv_enable_vblank,
        .disable_vblank         = sun4i_drv_disable_vblank,
 };
@@ -310,6 +310,7 @@ static int sun4i_drv_probe(struct platform_device *pdev)
 
                count += sun4i_drv_add_endpoints(&pdev->dev, &match,
                                                pipeline);
+               of_node_put(pipeline);
 
                DRM_DEBUG_DRIVER("Queued %d outputs on pipeline %d\n",
                                 count, i);
index 39386f50af876fd9494f9064384c5d050918d301..a71cf98c655fa7083c932683261c4c0517b25a8a 100644 (file)
@@ -1034,9 +1034,9 @@ out_unlock:
        return ret;
 }
 
-static bool ttm_bo_mem_compat(struct ttm_placement *placement,
-                             struct ttm_mem_reg *mem,
-                             uint32_t *new_flags)
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+                      struct ttm_mem_reg *mem,
+                      uint32_t *new_flags)
 {
        int i;
 
@@ -1068,6 +1068,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
 
        return false;
 }
+EXPORT_SYMBOL(ttm_bo_mem_compat);
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
                        struct ttm_placement *placement,
index 9b078a4939968d8521a96b3f83280f4f06b407bc..0cd889015dc57d5c0d0a8b89f39cd17b10303fbf 100644 (file)
@@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
 {
        struct ttm_buffer_object *bo = &buf->base;
        int ret;
+       uint32_t new_flags;
 
        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
        if (unlikely(ret != 0))
@@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto err;
 
-       ret = ttm_bo_validate(bo, placement, interruptible, false);
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, placement, interruptible, false);
+
        if (!ret)
                vmw_bo_pin_reserved(buf, true);
 
@@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 {
        struct ttm_buffer_object *bo = &buf->base;
        int ret;
+       uint32_t new_flags;
 
        ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
        if (unlikely(ret != 0))
@@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
        if (unlikely(ret != 0))
                goto err;
 
+       if (buf->pin_count > 0) {
+               ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+               goto out_unreserve;
+       }
+
        ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
                              false);
        if (likely(ret == 0) || ret == -ERESTARTSYS)
@@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
        struct ttm_placement placement;
        struct ttm_place place;
        int ret = 0;
+       uint32_t new_flags;
 
        place = vmw_vram_placement.placement[0];
        place.lpfn = bo->num_pages;
@@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
         */
        if (bo->mem.mem_type == TTM_PL_VRAM &&
            bo->mem.start < bo->num_pages &&
-           bo->mem.start > 0)
+           bo->mem.start > 0 &&
+           buf->pin_count == 0)
                (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
 
-       ret = ttm_bo_validate(bo, &placement, interruptible, false);
+       if (buf->pin_count > 0)
+               ret = ttm_bo_mem_compat(&placement, &bo->mem,
+                                       &new_flags) == true ? 0 : -EINVAL;
+       else
+               ret = ttm_bo_validate(bo, &placement, interruptible, false);
 
        /* For some reason we didn't end up at the start of vram */
        WARN_ON(ret == 0 && bo->offset != 0);
index 9fcd8200d485e61aae3ff8a0cbf4fd4289a6d54b..8d528fcf6e9606d4614c744eb39cc9f47b644e8c 100644 (file)
@@ -233,6 +233,7 @@ static int vmw_force_iommu;
 static int vmw_restrict_iommu;
 static int vmw_force_coherent;
 static int vmw_restrict_dma_mask;
+static int vmw_assume_16bpp;
 
 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
 static void vmw_master_init(struct vmw_master *);
@@ -249,6 +250,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
+MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
+module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
 
 
 static void vmw_print_capabilities(uint32_t capabilities)
@@ -660,6 +663,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
        dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
 
+       dev_priv->assume_16bpp = !!vmw_assume_16bpp;
+
        dev_priv->enable_fb = enable_fbdev;
 
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
@@ -706,6 +711,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                        vmw_read(dev_priv,
                                 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
 
+               /*
+                * Workaround for low memory 2D VMs to compensate for the
+                * allocation taken by fbdev
+                */
+               if (!(dev_priv->capabilities & SVGA_CAP_3D))
+                       mem_size *= 2;
+
                dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
                dev_priv->prim_bb_mem =
                        vmw_read(dev_priv,
index 1980e2a28265d195db5002977d39239f6b3251a9..89fb19443a3f50e81a43ce4923e92ce8cf69579a 100644 (file)
@@ -386,6 +386,7 @@ struct vmw_private {
        spinlock_t hw_lock;
        spinlock_t cap_lock;
        bool has_dx;
+       bool assume_16bpp;
 
        /*
         * VGA registers.
index 679a4cb98ee306bb47c5c0097f1aa829c17a7fc8..d2d93959b1198ce41470a1a76a8341553c1ddc6c 100644 (file)
@@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
 
        par->set_fb = &vfb->base;
 
-       if (!par->bo_ptr) {
-               /*
-                * Pin before mapping. Since we don't know in what placement
-                * to pin, call into KMS to do it for us.
-                */
-               ret = vfb->pin(vfb);
-               if (ret) {
-                       DRM_ERROR("Could not pin the fbdev framebuffer.\n");
-                       return ret;
-               }
-
-               ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
-                                 par->vmw_bo->base.num_pages, &par->map);
-               if (ret) {
-                       vfb->unpin(vfb);
-                       DRM_ERROR("Could not map the fbdev framebuffer.\n");
-                       return ret;
-               }
-
-               par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
-       }
-
        return 0;
 }
 
@@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
        if (ret)
                goto out_unlock;
 
+       if (!par->bo_ptr) {
+               struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
+
+               /*
+                * Pin before mapping. Since we don't know in what placement
+                * to pin, call into KMS to do it for us.
+                */
+               ret = vfb->pin(vfb);
+               if (ret) {
+                       DRM_ERROR("Could not pin the fbdev framebuffer.\n");
+                       goto out_unlock;
+               }
+
+               ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
+                                 par->vmw_bo->base.num_pages, &par->map);
+               if (ret) {
+                       vfb->unpin(vfb);
+                       DRM_ERROR("Could not map the fbdev framebuffer.\n");
+                       goto out_unlock;
+               }
+
+               par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
+       }
+
+
        vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
                          par->set_fb->width, par->set_fb->height);
 
index 55231cce73a01f047a1d660d3f3192d1f7ba80a6..e29da45a2847ecd28e829ced3b65a8a39622d798 100644 (file)
@@ -1553,14 +1553,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
                DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
        };
        int i;
-       u32 assumed_bpp = 2;
+       u32 assumed_bpp = 4;
 
-       /*
-        * If using screen objects, then assume 32-bpp because that's what the
-        * SVGA device is assuming
-        */
-       if (dev_priv->active_display_unit == vmw_du_screen_object)
-               assumed_bpp = 4;
+       if (dev_priv->assume_16bpp)
+               assumed_bpp = 2;
 
        if (dev_priv->active_display_unit == vmw_du_screen_target) {
                max_width  = min(max_width,  dev_priv->stdu_max_width);
index f0374f9b56cad3a522119f7d4cf8a8940f445bf0..e57a0bad7a626daf505625ce019f2583218a51ab 100644 (file)
@@ -300,6 +300,9 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
                break;
        }
 
+       if (retries == RETRIES)
+               return -EINVAL;
+
        *msg_len = reply_len;
        *msg     = reply;
 
index 9ca818fb034c923c4737ccfedb3ea98002aaae84..41932a7c4f79516da86f9e35d1223c8e033cf248 100644 (file)
@@ -399,8 +399,10 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
 
        WARN_ON_ONCE(!stdu->defined);
 
-       if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
-           new_fb->height == mode->vdisplay)
+       new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
+
+       if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay &&
+           new_vfbs->surface->base_size.height == mode->vdisplay)
                new_content_type = SAME_AS_DISPLAY;
        else if (vfb->dmabuf)
                new_content_type = SEPARATE_DMA;
@@ -444,7 +446,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
                        content_srf.mip_levels[0]     = 1;
                        content_srf.multisample_count = 0;
                } else {
-                       new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
                        content_srf = *new_vfbs->surface;
                }
 
@@ -464,7 +465,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
                        return ret;
                }
        } else if (new_content_type == SAME_AS_DISPLAY) {
-               new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
                new_display_srf = vmw_surface_reference(new_vfbs->surface);
        }
 
index ff940075bb9032714cd2eb1e68e69d5799caf1f9..eaf2f916d48c52c0315baef4f6784b90f59d7df2 100644 (file)
@@ -486,6 +486,18 @@ config SENSORS_FSCHMD
          This driver can also be built as a module.  If so, the module
          will be called fschmd.
 
+config SENSORS_FTSTEUTATES
+       tristate "Fujitsu Technology Solutions sensor chip Teutates"
+       depends on I2C && WATCHDOG
+       select WATCHDOG_CORE
+       help
+         If you say yes here you get support for the Fujitsu Technology
+         Solutions (FTS) sensor chip "Teutates" including support for
+         the integrated watchdog.
+
+         This driver can also be built as a module. If so, the module
+         will be called ftsteutates.
+
 config SENSORS_GL518SM
        tristate "Genesys Logic GL518SM"
        depends on I2C
@@ -645,8 +657,8 @@ config SENSORS_JC42
          temperature sensors, which are used on many DDR3 memory modules for
          mobile devices and servers.  Support will include, but not be limited
          to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
-         MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98, STTS424(E),
-         STTS2002, STTS3000, TSE2002, TSE2004, TS3000, and TS3001.
+         MCP9808, MCP98242, MCP98243, MCP98244, MCP9843, SE97, SE98,
+         STTS424(E), STTS2002, STTS3000, TSE2002, TSE2004, TS3000, and TS3001.
 
          This driver can also be built as a module.  If so, the module
          will be called jc42.
@@ -958,6 +970,7 @@ config SENSORS_LM75
        tristate "National Semiconductor LM75 and compatibles"
        depends on I2C
        depends on THERMAL || !THERMAL_OF
+       select REGMAP_I2C
        help
          If you say yes here you get support for one common type of
          temperature sensor chip, with models including:
@@ -1265,6 +1278,17 @@ config SENSORS_SHT21
          This driver can also be built as a module.  If so, the module
          will be called sht21.
 
+config SENSORS_SHT3x
+       tristate "Sensiron humidity and temperature sensors. SHT3x and compat."
+       depends on I2C
+       select CRC8
+       help
+         If you say yes here you get support for the Sensiron SHT30 and SHT31
+         humidity and temperature sensors.
+
+         This driver can also be built as a module.  If so, the module
+         will be called sht3x.
+
 config SENSORS_SHTC1
        tristate "Sensiron humidity and temperature sensors. SHTC1 and compat."
        depends on I2C
@@ -1514,6 +1538,17 @@ config SENSORS_INA2XX
          This driver can also be built as a module.  If so, the module
          will be called ina2xx.
 
+config SENSORS_INA3221
+       tristate "Texas Instruments INA3221 Triple Power Monitor"
+       depends on I2C
+       select REGMAP_I2C
+       help
+         If you say yes here you get support for  the TI INA3221 Triple Power
+         Monitor.
+
+         This driver can also be built as a module.  If so, the module
+         will be called ina3221.
+
 config SENSORS_TC74
        tristate "Microchip TC74"
        depends on I2C
@@ -1538,6 +1573,7 @@ config SENSORS_TMP102
        tristate "Texas Instruments TMP102"
        depends on I2C
        depends on THERMAL || !THERMAL_OF
+       select REGMAP_I2C
        help
          If you say yes here you get support for Texas Instruments TMP102
          sensor chips.
@@ -1561,7 +1597,7 @@ config SENSORS_TMP401
        depends on I2C
        help
          If you say yes here you get support for Texas Instruments TMP401,
-         TMP411, TMP431, TMP432 and TMP435 temperature sensor chips.
+         TMP411, TMP431, TMP432, TMP435, and TMP461 temperature sensor chips.
 
          This driver can also be built as a module.  If so, the module
          will be called tmp401.
index 2ef5b7c4c54ffad958eedcadd0ef225cc93989ec..fe87d2895a97a058838286d1b73d46b0b57940a2 100644 (file)
@@ -62,6 +62,7 @@ obj-$(CONFIG_SENSORS_F71882FG)        += f71882fg.o
 obj-$(CONFIG_SENSORS_F75375S)  += f75375s.o
 obj-$(CONFIG_SENSORS_FAM15H_POWER) += fam15h_power.o
 obj-$(CONFIG_SENSORS_FSCHMD)   += fschmd.o
+obj-$(CONFIG_SENSORS_FTSTEUTATES) += ftsteutates.o
 obj-$(CONFIG_SENSORS_G760A)    += g760a.o
 obj-$(CONFIG_SENSORS_G762)     += g762.o
 obj-$(CONFIG_SENSORS_GL518SM)  += gl518sm.o
@@ -77,6 +78,7 @@ obj-$(CONFIG_SENSORS_IBMPOWERNV)+= ibmpowernv.o
 obj-$(CONFIG_SENSORS_IIO_HWMON) += iio_hwmon.o
 obj-$(CONFIG_SENSORS_INA209)   += ina209.o
 obj-$(CONFIG_SENSORS_INA2XX)   += ina2xx.o
+obj-$(CONFIG_SENSORS_INA3221)  += ina3221.o
 obj-$(CONFIG_SENSORS_IT87)     += it87.o
 obj-$(CONFIG_SENSORS_JC42)     += jc42.o
 obj-$(CONFIG_SENSORS_JZ4740)   += jz4740-hwmon.o
@@ -138,6 +140,7 @@ obj-$(CONFIG_SENSORS_SCH5627)       += sch5627.o
 obj-$(CONFIG_SENSORS_SCH5636)  += sch5636.o
 obj-$(CONFIG_SENSORS_SHT15)    += sht15.o
 obj-$(CONFIG_SENSORS_SHT21)    += sht21.o
+obj-$(CONFIG_SENSORS_SHT3x)    += sht3x.o
 obj-$(CONFIG_SENSORS_SHTC1)    += shtc1.o
 obj-$(CONFIG_SENSORS_SIS5595)  += sis5595.o
 obj-$(CONFIG_SENSORS_SMM665)   += smm665.o
index 202c1fbb3407a7973d8ff993813dbf38dd802d24..8ea35932fbaa2cd648a217c18fddea8cdd47f1a1 100644 (file)
@@ -37,7 +37,6 @@ enum ad7314_variant {
 
 struct ad7314_data {
        struct spi_device       *spi_dev;
-       struct device           *hwmon_dev;
        u16 rx ____cacheline_aligned;
 };
 
@@ -88,62 +87,30 @@ static ssize_t ad7314_show_temperature(struct device *dev,
        }
 }
 
-static ssize_t ad7314_show_name(struct device *dev,
-                               struct device_attribute *devattr, char *buf)
-{
-       return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
-}
-
-static DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL);
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
                          ad7314_show_temperature, NULL, 0);
 
-static struct attribute *ad7314_attributes[] = {
-       &dev_attr_name.attr,
+static struct attribute *ad7314_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        NULL,
 };
 
-static const struct attribute_group ad7314_group = {
-       .attrs = ad7314_attributes,
-};
+ATTRIBUTE_GROUPS(ad7314);
 
 static int ad7314_probe(struct spi_device *spi_dev)
 {
-       int ret;
        struct ad7314_data *chip;
+       struct device *hwmon_dev;
 
        chip = devm_kzalloc(&spi_dev->dev, sizeof(*chip), GFP_KERNEL);
        if (chip == NULL)
                return -ENOMEM;
 
-       spi_set_drvdata(spi_dev, chip);
-
-       ret = sysfs_create_group(&spi_dev->dev.kobj, &ad7314_group);
-       if (ret < 0)
-               return ret;
-
-       chip->hwmon_dev = hwmon_device_register(&spi_dev->dev);
-       if (IS_ERR(chip->hwmon_dev)) {
-               ret = PTR_ERR(chip->hwmon_dev);
-               goto error_remove_group;
-       }
        chip->spi_dev = spi_dev;
-
-       return 0;
-error_remove_group:
-       sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group);
-       return ret;
-}
-
-static int ad7314_remove(struct spi_device *spi_dev)
-{
-       struct ad7314_data *chip = spi_get_drvdata(spi_dev);
-
-       hwmon_device_unregister(chip->hwmon_dev);
-       sysfs_remove_group(&spi_dev->dev.kobj, &ad7314_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(&spi_dev->dev,
+                                                          spi_dev->modalias,
+                                                          chip, ad7314_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static const struct spi_device_id ad7314_id[] = {
@@ -159,7 +126,6 @@ static struct spi_driver ad7314_driver = {
                .name = "ad7314",
        },
        .probe = ad7314_probe,
-       .remove = ad7314_remove,
        .id_table = ad7314_id,
 };
 
index 4fd9e4de19723432d172b07508777e629a400252..59bd7b9e1772c3e7fbd72ce20b156cc85182b80f 100644 (file)
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
-#include <linux/mutex.h>
 #include <linux/delay.h>
 
 #define DEVICE_NAME    "ads7871"
 
 struct ads7871_data {
-       struct device   *hwmon_dev;
-       struct mutex    update_lock;
+       struct spi_device *spi;
 };
 
 static int ads7871_read_reg8(struct spi_device *spi, int reg)
@@ -101,7 +99,8 @@ static int ads7871_write_reg8(struct spi_device *spi, int reg, u8 val)
 static ssize_t show_voltage(struct device *dev,
                struct device_attribute *da, char *buf)
 {
-       struct spi_device *spi = to_spi_device(dev);
+       struct ads7871_data *pdata = dev_get_drvdata(dev);
+       struct spi_device *spi = pdata->spi;
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
        int ret, val, i = 0;
        uint8_t channel, mux_cnv;
@@ -139,12 +138,6 @@ static ssize_t show_voltage(struct device *dev,
        }
 }
 
-static ssize_t ads7871_show_name(struct device *dev,
-                                struct device_attribute *devattr, char *buf)
-{
-       return sprintf(buf, "%s\n", to_spi_device(dev)->modalias);
-}
-
 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0);
 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1);
 static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2);
@@ -154,9 +147,7 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5);
 static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6);
 static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7);
 
-static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL);
-
-static struct attribute *ads7871_attributes[] = {
+static struct attribute *ads7871_attrs[] = {
        &sensor_dev_attr_in0_input.dev_attr.attr,
        &sensor_dev_attr_in1_input.dev_attr.attr,
        &sensor_dev_attr_in2_input.dev_attr.attr,
@@ -165,21 +156,18 @@ static struct attribute *ads7871_attributes[] = {
        &sensor_dev_attr_in5_input.dev_attr.attr,
        &sensor_dev_attr_in6_input.dev_attr.attr,
        &sensor_dev_attr_in7_input.dev_attr.attr,
-       &dev_attr_name.attr,
        NULL
 };
 
-static const struct attribute_group ads7871_group = {
-       .attrs = ads7871_attributes,
-};
+ATTRIBUTE_GROUPS(ads7871);
 
 static int ads7871_probe(struct spi_device *spi)
 {
-       int ret, err;
+       struct device *dev = &spi->dev;
+       int ret;
        uint8_t val;
        struct ads7871_data *pdata;
-
-       dev_dbg(&spi->dev, "probe\n");
+       struct device *hwmon_dev;
 
        /* Configure the SPI bus */
        spi->mode = (SPI_MODE_0);
@@ -193,7 +181,7 @@ static int ads7871_probe(struct spi_device *spi)
        ads7871_write_reg8(spi, REG_OSC_CONTROL, val);
        ret = ads7871_read_reg8(spi, REG_OSC_CONTROL);
 
-       dev_dbg(&spi->dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
+       dev_dbg(dev, "REG_OSC_CONTROL write:%x, read:%x\n", val, ret);
        /*
         * because there is no other error checking on an SPI bus
         * we need to make sure we really have a chip
@@ -201,46 +189,23 @@ static int ads7871_probe(struct spi_device *spi)
        if (val != ret)
                return -ENODEV;
 
-       pdata = devm_kzalloc(&spi->dev, sizeof(struct ads7871_data),
-                            GFP_KERNEL);
+       pdata = devm_kzalloc(dev, sizeof(struct ads7871_data), GFP_KERNEL);
        if (!pdata)
                return -ENOMEM;
 
-       err = sysfs_create_group(&spi->dev.kobj, &ads7871_group);
-       if (err < 0)
-               return err;
-
-       spi_set_drvdata(spi, pdata);
+       pdata->spi = spi;
 
-       pdata->hwmon_dev = hwmon_device_register(&spi->dev);
-       if (IS_ERR(pdata->hwmon_dev)) {
-               err = PTR_ERR(pdata->hwmon_dev);
-               goto error_remove;
-       }
-
-       return 0;
-
-error_remove:
-       sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
-       return err;
-}
-
-static int ads7871_remove(struct spi_device *spi)
-{
-       struct ads7871_data *pdata = spi_get_drvdata(spi);
-
-       hwmon_device_unregister(pdata->hwmon_dev);
-       sysfs_remove_group(&spi->dev.kobj, &ads7871_group);
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, spi->modalias,
+                                                          pdata,
+                                                          ads7871_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static struct spi_driver ads7871_driver = {
        .driver = {
                .name = DEVICE_NAME,
        },
-
        .probe = ads7871_probe,
-       .remove = ads7871_remove,
 };
 
 module_spi_driver(ads7871_driver);
index 827c037031287637c602f4f3d6ca9c1790686f16..a7f886961830cf9c5ca7ecfaf9d8839ce1accbaf 100644 (file)
@@ -30,6 +30,7 @@
 
 #define ADT7411_REG_CFG1                       0x18
 #define ADT7411_CFG1_START_MONITOR             (1 << 0)
+#define ADT7411_CFG1_RESERVED_BIT3             (1 << 3)
 
 #define ADT7411_REG_CFG2                       0x19
 #define ADT7411_CFG2_DISABLE_AVG               (1 << 5)
@@ -296,8 +297,10 @@ static int adt7411_probe(struct i2c_client *client,
        mutex_init(&data->device_lock);
        mutex_init(&data->update_lock);
 
+       /* According to the datasheet, we must only write 1 to bit 3 */
        ret = adt7411_modify_bit(client, ADT7411_REG_CFG1,
-                                ADT7411_CFG1_START_MONITOR, 1);
+                                ADT7411_CFG1_RESERVED_BIT3
+                                | ADT7411_CFG1_START_MONITOR, 1);
        if (ret < 0)
                return ret;
 
index 2ac87d553e22f67ebaca32d62fb76375e3d22bdc..acf9c0361d9f7e16f639fbbfeac642b1ca5258b5 100644 (file)
@@ -81,6 +81,7 @@ static bool disallow_fan_type_call;
 #define I8K_HWMON_HAVE_TEMP4   (1 << 3)
 #define I8K_HWMON_HAVE_FAN1    (1 << 4)
 #define I8K_HWMON_HAVE_FAN2    (1 << 5)
+#define I8K_HWMON_HAVE_FAN3    (1 << 6)
 
 MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
 MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
@@ -139,6 +140,14 @@ static int i8k_smm(struct smm_regs *regs)
        int eax = regs->eax;
        cpumask_var_t old_mask;
 
+#ifdef DEBUG
+       int ebx = regs->ebx;
+       unsigned long duration;
+       ktime_t calltime, delta, rettime;
+
+       calltime = ktime_get();
+#endif
+
        /* SMM requires CPU 0 */
        if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
                return -ENOMEM;
@@ -210,6 +219,15 @@ static int i8k_smm(struct smm_regs *regs)
 out:
        set_cpus_allowed_ptr(current, old_mask);
        free_cpumask_var(old_mask);
+
+#ifdef DEBUG
+       rettime = ktime_get();
+       delta = ktime_sub(rettime, calltime);
+       duration = ktime_to_ns(delta) >> 10;
+       pr_debug("smm(0x%.4x 0x%.4x) = 0x%.4x  (took %7lu usecs)\n", eax, ebx,
+               (rc ? 0xffff : regs->eax & 0xffff), duration);
+#endif
+
        return rc;
 }
 
@@ -252,7 +270,7 @@ static int _i8k_get_fan_type(int fan)
 static int i8k_get_fan_type(int fan)
 {
        /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
-       static int types[2] = { INT_MIN, INT_MIN };
+       static int types[3] = { INT_MIN, INT_MIN, INT_MIN };
 
        if (types[fan] == INT_MIN)
                types[fan] = _i8k_get_fan_type(fan);
@@ -719,6 +737,12 @@ static SENSOR_DEVICE_ATTR(fan2_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL,
                          1);
 static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
                          i8k_hwmon_set_pwm, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, i8k_hwmon_show_fan, NULL,
+                         2);
+static SENSOR_DEVICE_ATTR(fan3_label, S_IRUGO, i8k_hwmon_show_fan_label, NULL,
+                         2);
+static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, i8k_hwmon_show_pwm,
+                         i8k_hwmon_set_pwm, 2);
 
 static struct attribute *i8k_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,     /* 0 */
@@ -735,6 +759,9 @@ static struct attribute *i8k_attrs[] = {
        &sensor_dev_attr_fan2_input.dev_attr.attr,      /* 11 */
        &sensor_dev_attr_fan2_label.dev_attr.attr,      /* 12 */
        &sensor_dev_attr_pwm2.dev_attr.attr,            /* 13 */
+       &sensor_dev_attr_fan3_input.dev_attr.attr,      /* 14 */
+       &sensor_dev_attr_fan3_label.dev_attr.attr,      /* 15 */
+       &sensor_dev_attr_pwm3.dev_attr.attr,            /* 16 */
        NULL
 };
 
@@ -742,7 +769,7 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
                              int index)
 {
        if (disallow_fan_type_call &&
-           (index == 9 || index == 12))
+           (index == 9 || index == 12 || index == 15))
                return 0;
        if (index >= 0 && index <= 1 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
@@ -762,6 +789,9 @@ static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
        if (index >= 11 && index <= 13 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN2))
                return 0;
+       if (index >= 14 && index <= 16 &&
+           !(i8k_hwmon_flags & I8K_HWMON_HAVE_FAN3))
+               return 0;
 
        return attr->mode;
 }
@@ -807,6 +837,13 @@ static int __init i8k_init_hwmon(void)
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
 
+       /* Third fan attributes, if fan status or type is OK */
+       err = i8k_get_fan_status(2);
+       if (err < 0)
+               err = i8k_get_fan_type(2);
+       if (err >= 0)
+               i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN3;
+
        i8k_hwmon_dev = hwmon_device_register_with_groups(NULL, "dell_smm",
                                                          NULL, i8k_groups);
        if (IS_ERR(i8k_hwmon_dev)) {
index ada90716448d196f13cdb75251522f5877d08d99..f37fe2011640c44f2fb4a285da5196b8e79b9be8 100644 (file)
@@ -464,7 +464,7 @@ static int emc6w201_detect(struct i2c_client *client,
        if (verstep < 0 || (verstep & 0xF0) != 0xB0)
                return -ENODEV;
        if ((verstep & 0x0F) > 2) {
-               dev_dbg(&client->dev, "Unknwown EMC6W201 stepping %d\n",
+               dev_dbg(&client->dev, "Unknown EMC6W201 stepping %d\n",
                        verstep & 0x0F);
                return -ENODEV;
        }
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
new file mode 100644 (file)
index 0000000..2b2ff67
--- /dev/null
@@ -0,0 +1,819 @@
+/*
+ * Support for the FTS Systemmonitoring Chip "Teutates"
+ *
+ * Copyright (C) 2016 Fujitsu Technology Solutions GmbH,
+ *               Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/uaccess.h>
+#include <linux/watchdog.h>
+
+#define FTS_DEVICE_ID_REG              0x0000
+#define FTS_DEVICE_REVISION_REG                0x0001
+#define FTS_DEVICE_STATUS_REG          0x0004
+#define FTS_SATELLITE_STATUS_REG       0x0005
+#define FTS_EVENT_STATUS_REG           0x0006
+#define FTS_GLOBAL_CONTROL_REG         0x0007
+
+#define FTS_SENSOR_EVENT_REG           0x0010
+
+#define FTS_FAN_EVENT_REG              0x0014
+#define FTS_FAN_PRESENT_REG            0x0015
+
+#define FTS_POWER_ON_TIME_COUNTER_A    0x007A
+#define FTS_POWER_ON_TIME_COUNTER_B    0x007B
+#define FTS_POWER_ON_TIME_COUNTER_C    0x007C
+
+#define FTS_PAGE_SELECT_REG            0x007F
+
+#define FTS_WATCHDOG_TIME_PRESET       0x000B
+#define FTS_WATCHDOG_CONTROL           0x5081
+
+#define FTS_NO_FAN_SENSORS             0x08
+#define FTS_NO_TEMP_SENSORS            0x10
+#define FTS_NO_VOLT_SENSORS            0x04
+
+static struct i2c_device_id fts_id[] = {
+       { "ftsteutates", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, fts_id);
+
+enum WATCHDOG_RESOLUTION {
+       seconds = 1,
+       minutes = 60
+};
+
+struct fts_data {
+       struct i2c_client *client;
+       /* update sensor data lock */
+       struct mutex update_lock;
+       /* read/write register lock */
+       struct mutex access_lock;
+       unsigned long last_updated; /* in jiffies */
+       struct watchdog_device wdd;
+       enum WATCHDOG_RESOLUTION resolution;
+       bool valid; /* false until following fields are valid */
+
+       u8 volt[FTS_NO_VOLT_SENSORS];
+
+       u8 temp_input[FTS_NO_TEMP_SENSORS];
+       u8 temp_alarm;
+
+       u8 fan_present;
+       u8 fan_input[FTS_NO_FAN_SENSORS]; /* in rps */
+       u8 fan_source[FTS_NO_FAN_SENSORS];
+       u8 fan_alarm;
+};
+
+#define FTS_REG_FAN_INPUT(idx) ((idx) + 0x20)
+#define FTS_REG_FAN_SOURCE(idx) ((idx) + 0x30)
+#define FTS_REG_FAN_CONTROL(idx) (((idx) << 16) + 0x4881)
+
+#define FTS_REG_TEMP_INPUT(idx) ((idx) + 0x40)
+#define FTS_REG_TEMP_CONTROL(idx) (((idx) << 16) + 0x0681)
+
+#define FTS_REG_VOLT(idx) ((idx) + 0x18)
+
+/*****************************************************************************/
+/* I2C Helper functions                                                             */
+/*****************************************************************************/
+static int fts_read_byte(struct i2c_client *client, unsigned short reg)
+{
+       int ret;
+       unsigned char page = reg >> 8;
+       struct fts_data *data = dev_get_drvdata(&client->dev);
+
+       mutex_lock(&data->access_lock);
+
+       dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
+       ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
+       if (ret < 0)
+               goto error;
+
+       reg &= 0xFF;
+       ret = i2c_smbus_read_byte_data(client, reg);
+       dev_dbg(&client->dev, "read - reg: 0x%.02x: val: 0x%.02x\n", reg, ret);
+
+error:
+       mutex_unlock(&data->access_lock);
+       return ret;
+}
+
+static int fts_write_byte(struct i2c_client *client, unsigned short reg,
+                         unsigned char value)
+{
+       int ret;
+       unsigned char page = reg >> 8;
+       struct fts_data *data = dev_get_drvdata(&client->dev);
+
+       mutex_lock(&data->access_lock);
+
+       dev_dbg(&client->dev, "page select - page: 0x%.02x\n", page);
+       ret = i2c_smbus_write_byte_data(client, FTS_PAGE_SELECT_REG, page);
+       if (ret < 0)
+               goto error;
+
+       reg &= 0xFF;
+       dev_dbg(&client->dev,
+               "write - reg: 0x%.02x: val: 0x%.02x\n", reg, value);
+       ret = i2c_smbus_write_byte_data(client, reg, value);
+
+error:
+       mutex_unlock(&data->access_lock);
+       return ret;
+}
+
+/*****************************************************************************/
+/* Data Updater Helper function                                                     */
+/*****************************************************************************/
+static int fts_update_device(struct fts_data *data)
+{
+       int i;
+       int err = 0;
+
+       mutex_lock(&data->update_lock);
+       if (!time_after(jiffies, data->last_updated + 2 * HZ) && data->valid)
+               goto exit;
+
+       err = fts_read_byte(data->client, FTS_DEVICE_STATUS_REG);
+       if (err < 0)
+               goto exit;
+
+       data->valid = !!(err & 0x02); /* Data not ready yet */
+       if (unlikely(!data->valid)) {
+               err = -EAGAIN;
+               goto exit;
+       }
+
+       err = fts_read_byte(data->client, FTS_FAN_PRESENT_REG);
+       if (err < 0)
+               goto exit;
+       data->fan_present = err;
+
+       err = fts_read_byte(data->client, FTS_FAN_EVENT_REG);
+       if (err < 0)
+               goto exit;
+       data->fan_alarm = err;
+
+       for (i = 0; i < FTS_NO_FAN_SENSORS; i++) {
+               if (data->fan_present & BIT(i)) {
+                       err = fts_read_byte(data->client, FTS_REG_FAN_INPUT(i));
+                       if (err < 0)
+                               goto exit;
+                       data->fan_input[i] = err;
+
+                       err = fts_read_byte(data->client,
+                                           FTS_REG_FAN_SOURCE(i));
+                       if (err < 0)
+                               goto exit;
+                       data->fan_source[i] = err;
+               } else {
+                       data->fan_input[i] = 0;
+                       data->fan_source[i] = 0;
+               }
+       }
+
+       err = fts_read_byte(data->client, FTS_SENSOR_EVENT_REG);
+       if (err < 0)
+               goto exit;
+       data->temp_alarm = err;
+
+       for (i = 0; i < FTS_NO_TEMP_SENSORS; i++) {
+               err = fts_read_byte(data->client, FTS_REG_TEMP_INPUT(i));
+               if (err < 0)
+                       goto exit;
+               data->temp_input[i] = err;
+       }
+
+       for (i = 0; i < FTS_NO_VOLT_SENSORS; i++) {
+               err = fts_read_byte(data->client, FTS_REG_VOLT(i));
+               if (err < 0)
+                       goto exit;
+               data->volt[i] = err;
+       }
+       data->last_updated = jiffies;
+       err = 0;
+exit:
+       mutex_unlock(&data->update_lock);
+       return err;
+}
+
+/*****************************************************************************/
+/* Watchdog functions                                                       */
+/*****************************************************************************/
+static int fts_wd_set_resolution(struct fts_data *data,
+                                enum WATCHDOG_RESOLUTION resolution)
+{
+       int ret;
+
+       if (data->resolution == resolution)
+               return 0;
+
+       ret = fts_read_byte(data->client, FTS_WATCHDOG_CONTROL);
+       if (ret < 0)
+               return ret;
+
+       if ((resolution == seconds && ret & BIT(1)) ||
+           (resolution == minutes && (ret & BIT(1)) == 0)) {
+               data->resolution = resolution;
+               return 0;
+       }
+
+       if (resolution == seconds)
+               set_bit(1, (unsigned long *)&ret);
+       else
+               ret &= ~BIT(1);
+
+       ret = fts_write_byte(data->client, FTS_WATCHDOG_CONTROL, ret);
+       if (ret < 0)
+               return ret;
+
+       data->resolution = resolution;
+       return ret;
+}
+
+static int fts_wd_set_timeout(struct watchdog_device *wdd, unsigned int timeout)
+{
+       struct fts_data *data;
+       enum WATCHDOG_RESOLUTION resolution = seconds;
+       int ret;
+
+       data = watchdog_get_drvdata(wdd);
+       /* switch watchdog resolution to minutes if timeout does not fit
+        * into a byte
+        */
+       if (timeout > 0xFF) {
+               timeout = DIV_ROUND_UP(timeout, 60) * 60;
+               resolution = minutes;
+       }
+
+       ret = fts_wd_set_resolution(data, resolution);
+       if (ret < 0)
+               return ret;
+
+       wdd->timeout = timeout;
+       return 0;
+}
+
+static int fts_wd_start(struct watchdog_device *wdd)
+{
+       struct fts_data *data = watchdog_get_drvdata(wdd);
+
+       return fts_write_byte(data->client, FTS_WATCHDOG_TIME_PRESET,
+                             wdd->timeout / (u8)data->resolution);
+}
+
+static int fts_wd_stop(struct watchdog_device *wdd)
+{
+       struct fts_data *data;
+
+       data = watchdog_get_drvdata(wdd);
+       return fts_write_byte(data->client, FTS_WATCHDOG_TIME_PRESET, 0);
+}
+
+static const struct watchdog_info fts_wd_info = {
+       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
+       .identity = "FTS Teutates Hardware Watchdog",
+};
+
+static const struct watchdog_ops fts_wd_ops = {
+       .owner = THIS_MODULE,
+       .start = fts_wd_start,
+       .stop = fts_wd_stop,
+       .set_timeout = fts_wd_set_timeout,
+};
+
+static int fts_watchdog_init(struct fts_data *data)
+{
+       int timeout, ret;
+
+       watchdog_set_drvdata(&data->wdd, data);
+
+       timeout = fts_read_byte(data->client, FTS_WATCHDOG_TIME_PRESET);
+       if (timeout < 0)
+               return timeout;
+
+       /* watchdog not running, set timeout to a default of 60 sec. */
+       if (timeout == 0) {
+               ret = fts_wd_set_resolution(data, seconds);
+               if (ret < 0)
+                       return ret;
+               data->wdd.timeout = 60;
+       } else {
+               ret = fts_read_byte(data->client, FTS_WATCHDOG_CONTROL);
+               if (ret < 0)
+                       return ret;
+
+               data->resolution = ret & BIT(1) ? seconds : minutes;
+               data->wdd.timeout = timeout * (u8)data->resolution;
+               set_bit(WDOG_HW_RUNNING, &data->wdd.status);
+       }
+
+       /* Register our watchdog part */
+       data->wdd.info = &fts_wd_info;
+       data->wdd.ops = &fts_wd_ops;
+       data->wdd.parent = &data->client->dev;
+       data->wdd.min_timeout = 1;
+
+       /* max timeout 255 minutes. */
+       data->wdd.max_hw_heartbeat_ms = 0xFF * 60 * MSEC_PER_SEC;
+
+       return watchdog_register_device(&data->wdd);
+}
+
+/*****************************************************************************/
+/* SysFS handler functions                                                  */
+/*****************************************************************************/
+static ssize_t show_in_value(struct device *dev,
+                            struct device_attribute *devattr, char *buf)
+{
+       struct fts_data *data = dev_get_drvdata(dev);
+       int index = to_sensor_dev_attr(devattr)->index;
+       int err;
+
+       err = fts_update_device(data);
+       if (err < 0)
+               return err;
+
+       return sprintf(buf, "%u\n", data->volt[index]);
+}
+
+static ssize_t show_temp_value(struct device *dev,
+                              struct device_attribute *devattr, char *buf)
+{
+       struct fts_data *data = dev_get_drvdata(dev);
+       int index = to_sensor_dev_attr(devattr)->index;
+       int err;
+
+       err = fts_update_device(data);
+       if (err < 0)
+               return err;
+
+       return sprintf(buf, "%u\n", data->temp_input[index]);
+}
+
+static ssize_t show_temp_fault(struct device *dev,
+                              struct device_attribute *devattr, char *buf)
+{
+       struct fts_data *data = dev_get_drvdata(dev);
+       int index = to_sensor_dev_attr(devattr)->index;
+       int err;
+
+       err = fts_update_device(data);
+       if (err < 0)
+               return err;
+
+       /* 00h Temperature = Sensor Error */
+       return sprintf(buf, "%d\n", data->temp_input[index] == 0);
+}
+
+static ssize_t show_temp_alarm(struct device *dev,
+                              struct device_attribute *devattr, char *buf)
+{
+       struct fts_data *data = dev_get_drvdata(dev);
+       int index = to_sensor_dev_attr(devattr)->index;
+       int err;
+
+       err = fts_update_device(data);
+       if (err < 0)
+               return err;
+
+       return sprintf(buf, "%u\n", !!(data->temp_alarm & BIT(index)));
+}
+
+static ssize_t
+clear_temp_alarm(struct device *dev, struct device_attribute *devattr,
+                const char *buf, size_t count)
+{
+       struct fts_data *data = dev_get_drvdata(dev);
+       int index = to_sensor_dev_attr(devattr)->index;
+       long ret;
+
+       ret = fts_update_device(data);
+       if (ret < 0)
+               return ret;
+
+       if (kstrtoul(buf, 10, &ret) || ret != 0)
+               return -EINVAL;
+
+       mutex_lock(&data->update_lock);
+       ret = fts_read_byte(data->client, FTS_REG_TEMP_CONTROL(index));
+       if (ret < 0)
+               goto error;
+
+       ret = fts_write_byte(data->client, FTS_REG_TEMP_CONTROL(index),
+                            ret | 0x1);
+       if (ret < 0)
+               goto error;
+
+       data->valid = false;
+error:
+       mutex_unlock(&data->update_lock);
+       return ret;
+}
+
+static ssize_t show_fan_value(struct device *dev,
+                             struct device_attribute *devattr, char *buf)
+{
+       struct fts_data *data = dev_get_drvdata(dev);
+       int index = to_sensor_dev_attr(devattr)->index;
+       int err;
+
+       err = fts_update_device(data);
+       if (err < 0)
+               return err;
+
+       return sprintf(buf, "%u\n", data->fan_input[index]);
+}
+
+static ssize_t show_fan_source(struct device *dev,
+                              struct device_attribute *devattr, char *buf)
+{
+       struct fts_data *data = dev_get_drvdata(dev);
+       int index = to_sensor_dev_attr(devattr)->index;
+       int err;
+
+       err = fts_update_device(data);
+       if (err < 0)
+               return err;
+
+       return sprintf(buf, "%u\n", data->fan_source[index]);
+}
+
+static ssize_t show_fan_alarm(struct device *dev,
+                             struct device_attribute *devattr, char *buf)
+{
+       struct fts_data *data = dev_get_drvdata(dev);
+       int index = to_sensor_dev_attr(devattr)->index;
+       int err;
+
+       err = fts_update_device(data);
+       if (err < 0)
+               return err;
+
+       return sprintf(buf, "%d\n", !!(data->fan_alarm & BIT(index)));
+}
+
+static ssize_t
+clear_fan_alarm(struct device *dev, struct device_attribute *devattr,
+               const char *buf, size_t count)
+{
+       struct fts_data *data = dev_get_drvdata(dev);
+       int index = to_sensor_dev_attr(devattr)->index;
+       long ret;
+
+       ret = fts_update_device(data);
+       if (ret < 0)
+               return ret;
+
+       if (kstrtoul(buf, 10, &ret) || ret != 0)
+               return -EINVAL;
+
+       mutex_lock(&data->update_lock);
+       ret = fts_read_byte(data->client, FTS_REG_FAN_CONTROL(index));
+       if (ret < 0)
+               goto error;
+
+       ret = fts_write_byte(data->client, FTS_REG_FAN_CONTROL(index),
+                            ret | 0x1);
+       if (ret < 0)
+               goto error;
+
+       data->valid = false;
+error:
+       mutex_unlock(&data->update_lock);
+       return ret;
+}
+
+/*****************************************************************************/
+/* SysFS structs                                                            */
+/*****************************************************************************/
+
+/* Temprature sensors */
+static SENSOR_DEVICE_ATTR(temp1_input,  S_IRUGO, show_temp_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_input,  S_IRUGO, show_temp_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_input,  S_IRUGO, show_temp_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_input,  S_IRUGO, show_temp_value, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_input,  S_IRUGO, show_temp_value, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_input,  S_IRUGO, show_temp_value, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_input,  S_IRUGO, show_temp_value, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_input,  S_IRUGO, show_temp_value, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_input,  S_IRUGO, show_temp_value, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_input, S_IRUGO, show_temp_value, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_input, S_IRUGO, show_temp_value, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_input, S_IRUGO, show_temp_value, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_input, S_IRUGO, show_temp_value, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_input, S_IRUGO, show_temp_value, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_input, S_IRUGO, show_temp_value, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_input, S_IRUGO, show_temp_value, NULL, 15);
+
+static SENSOR_DEVICE_ATTR(temp1_fault,  S_IRUGO, show_temp_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_fault,  S_IRUGO, show_temp_fault, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp3_fault,  S_IRUGO, show_temp_fault, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp4_fault,  S_IRUGO, show_temp_fault, NULL, 3);
+static SENSOR_DEVICE_ATTR(temp5_fault,  S_IRUGO, show_temp_fault, NULL, 4);
+static SENSOR_DEVICE_ATTR(temp6_fault,  S_IRUGO, show_temp_fault, NULL, 5);
+static SENSOR_DEVICE_ATTR(temp7_fault,  S_IRUGO, show_temp_fault, NULL, 6);
+static SENSOR_DEVICE_ATTR(temp8_fault,  S_IRUGO, show_temp_fault, NULL, 7);
+static SENSOR_DEVICE_ATTR(temp9_fault,  S_IRUGO, show_temp_fault, NULL, 8);
+static SENSOR_DEVICE_ATTR(temp10_fault, S_IRUGO, show_temp_fault, NULL, 9);
+static SENSOR_DEVICE_ATTR(temp11_fault, S_IRUGO, show_temp_fault, NULL, 10);
+static SENSOR_DEVICE_ATTR(temp12_fault, S_IRUGO, show_temp_fault, NULL, 11);
+static SENSOR_DEVICE_ATTR(temp13_fault, S_IRUGO, show_temp_fault, NULL, 12);
+static SENSOR_DEVICE_ATTR(temp14_fault, S_IRUGO, show_temp_fault, NULL, 13);
+static SENSOR_DEVICE_ATTR(temp15_fault, S_IRUGO, show_temp_fault, NULL, 14);
+static SENSOR_DEVICE_ATTR(temp16_fault, S_IRUGO, show_temp_fault, NULL, 15);
+
+static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 0);
+static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 1);
+static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 2);
+static SENSOR_DEVICE_ATTR(temp4_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 3);
+static SENSOR_DEVICE_ATTR(temp5_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 4);
+static SENSOR_DEVICE_ATTR(temp6_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 5);
+static SENSOR_DEVICE_ATTR(temp7_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 6);
+static SENSOR_DEVICE_ATTR(temp8_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 7);
+static SENSOR_DEVICE_ATTR(temp9_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 8);
+static SENSOR_DEVICE_ATTR(temp10_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 9);
+static SENSOR_DEVICE_ATTR(temp11_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 10);
+static SENSOR_DEVICE_ATTR(temp12_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 11);
+static SENSOR_DEVICE_ATTR(temp13_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 12);
+static SENSOR_DEVICE_ATTR(temp14_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 13);
+static SENSOR_DEVICE_ATTR(temp15_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 14);
+static SENSOR_DEVICE_ATTR(temp16_alarm, S_IRUGO | S_IWUSR, show_temp_alarm,
+                         clear_temp_alarm, 15);
+
+static struct attribute *fts_temp_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       &sensor_dev_attr_temp2_input.dev_attr.attr,
+       &sensor_dev_attr_temp3_input.dev_attr.attr,
+       &sensor_dev_attr_temp4_input.dev_attr.attr,
+       &sensor_dev_attr_temp5_input.dev_attr.attr,
+       &sensor_dev_attr_temp6_input.dev_attr.attr,
+       &sensor_dev_attr_temp7_input.dev_attr.attr,
+       &sensor_dev_attr_temp8_input.dev_attr.attr,
+       &sensor_dev_attr_temp9_input.dev_attr.attr,
+       &sensor_dev_attr_temp10_input.dev_attr.attr,
+       &sensor_dev_attr_temp11_input.dev_attr.attr,
+       &sensor_dev_attr_temp12_input.dev_attr.attr,
+       &sensor_dev_attr_temp13_input.dev_attr.attr,
+       &sensor_dev_attr_temp14_input.dev_attr.attr,
+       &sensor_dev_attr_temp15_input.dev_attr.attr,
+       &sensor_dev_attr_temp16_input.dev_attr.attr,
+
+       &sensor_dev_attr_temp1_fault.dev_attr.attr,
+       &sensor_dev_attr_temp2_fault.dev_attr.attr,
+       &sensor_dev_attr_temp3_fault.dev_attr.attr,
+       &sensor_dev_attr_temp4_fault.dev_attr.attr,
+       &sensor_dev_attr_temp5_fault.dev_attr.attr,
+       &sensor_dev_attr_temp6_fault.dev_attr.attr,
+       &sensor_dev_attr_temp7_fault.dev_attr.attr,
+       &sensor_dev_attr_temp8_fault.dev_attr.attr,
+       &sensor_dev_attr_temp9_fault.dev_attr.attr,
+       &sensor_dev_attr_temp10_fault.dev_attr.attr,
+       &sensor_dev_attr_temp11_fault.dev_attr.attr,
+       &sensor_dev_attr_temp12_fault.dev_attr.attr,
+       &sensor_dev_attr_temp13_fault.dev_attr.attr,
+       &sensor_dev_attr_temp14_fault.dev_attr.attr,
+       &sensor_dev_attr_temp15_fault.dev_attr.attr,
+       &sensor_dev_attr_temp16_fault.dev_attr.attr,
+
+       &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp2_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp3_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp4_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp5_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp6_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp7_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp8_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp9_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp10_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp11_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp12_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp13_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp14_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp15_alarm.dev_attr.attr,
+       &sensor_dev_attr_temp16_alarm.dev_attr.attr,
+       NULL
+};
+
+/* Fans */
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan_value, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan5_input, S_IRUGO, show_fan_value, NULL, 4);
+static SENSOR_DEVICE_ATTR(fan6_input, S_IRUGO, show_fan_value, NULL, 5);
+static SENSOR_DEVICE_ATTR(fan7_input, S_IRUGO, show_fan_value, NULL, 6);
+static SENSOR_DEVICE_ATTR(fan8_input, S_IRUGO, show_fan_value, NULL, 7);
+
+static SENSOR_DEVICE_ATTR(fan1_source, S_IRUGO, show_fan_source, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan2_source, S_IRUGO, show_fan_source, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan3_source, S_IRUGO, show_fan_source, NULL, 2);
+static SENSOR_DEVICE_ATTR(fan4_source, S_IRUGO, show_fan_source, NULL, 3);
+static SENSOR_DEVICE_ATTR(fan5_source, S_IRUGO, show_fan_source, NULL, 4);
+static SENSOR_DEVICE_ATTR(fan6_source, S_IRUGO, show_fan_source, NULL, 5);
+static SENSOR_DEVICE_ATTR(fan7_source, S_IRUGO, show_fan_source, NULL, 6);
+static SENSOR_DEVICE_ATTR(fan8_source, S_IRUGO, show_fan_source, NULL, 7);
+
+static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO | S_IWUSR,
+                        show_fan_alarm, clear_fan_alarm, 0);
+static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO | S_IWUSR,
+                        show_fan_alarm, clear_fan_alarm, 1);
+static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO | S_IWUSR,
+                        show_fan_alarm, clear_fan_alarm, 2);
+static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO | S_IWUSR,
+                        show_fan_alarm, clear_fan_alarm, 3);
+static SENSOR_DEVICE_ATTR(fan5_alarm, S_IRUGO | S_IWUSR,
+                        show_fan_alarm, clear_fan_alarm, 4);
+static SENSOR_DEVICE_ATTR(fan6_alarm, S_IRUGO | S_IWUSR,
+                        show_fan_alarm, clear_fan_alarm, 5);
+static SENSOR_DEVICE_ATTR(fan7_alarm, S_IRUGO | S_IWUSR,
+                        show_fan_alarm, clear_fan_alarm, 6);
+static SENSOR_DEVICE_ATTR(fan8_alarm, S_IRUGO | S_IWUSR,
+                        show_fan_alarm, clear_fan_alarm, 7);
+
+static struct attribute *fts_fan_attrs[] = {
+       &sensor_dev_attr_fan1_input.dev_attr.attr,
+       &sensor_dev_attr_fan2_input.dev_attr.attr,
+       &sensor_dev_attr_fan3_input.dev_attr.attr,
+       &sensor_dev_attr_fan4_input.dev_attr.attr,
+       &sensor_dev_attr_fan5_input.dev_attr.attr,
+       &sensor_dev_attr_fan6_input.dev_attr.attr,
+       &sensor_dev_attr_fan7_input.dev_attr.attr,
+       &sensor_dev_attr_fan8_input.dev_attr.attr,
+
+       &sensor_dev_attr_fan1_source.dev_attr.attr,
+       &sensor_dev_attr_fan2_source.dev_attr.attr,
+       &sensor_dev_attr_fan3_source.dev_attr.attr,
+       &sensor_dev_attr_fan4_source.dev_attr.attr,
+       &sensor_dev_attr_fan5_source.dev_attr.attr,
+       &sensor_dev_attr_fan6_source.dev_attr.attr,
+       &sensor_dev_attr_fan7_source.dev_attr.attr,
+       &sensor_dev_attr_fan8_source.dev_attr.attr,
+
+       &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+       &sensor_dev_attr_fan2_alarm.dev_attr.attr,
+       &sensor_dev_attr_fan3_alarm.dev_attr.attr,
+       &sensor_dev_attr_fan4_alarm.dev_attr.attr,
+       &sensor_dev_attr_fan5_alarm.dev_attr.attr,
+       &sensor_dev_attr_fan6_alarm.dev_attr.attr,
+       &sensor_dev_attr_fan7_alarm.dev_attr.attr,
+       &sensor_dev_attr_fan8_alarm.dev_attr.attr,
+       NULL
+};
+
+/* Voltages */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in_value, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in_value, NULL, 1);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in_value, NULL, 2);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in_value, NULL, 3);
+static struct attribute *fts_voltage_attrs[] = {
+       &sensor_dev_attr_in1_input.dev_attr.attr,
+       &sensor_dev_attr_in2_input.dev_attr.attr,
+       &sensor_dev_attr_in3_input.dev_attr.attr,
+       &sensor_dev_attr_in4_input.dev_attr.attr,
+       NULL
+};
+
+static const struct attribute_group fts_voltage_attr_group = {
+       .attrs = fts_voltage_attrs
+};
+
+static const struct attribute_group fts_temp_attr_group = {
+       .attrs = fts_temp_attrs
+};
+
+static const struct attribute_group fts_fan_attr_group = {
+       .attrs = fts_fan_attrs
+};
+
+static const struct attribute_group *fts_attr_groups[] = {
+       &fts_voltage_attr_group,
+       &fts_temp_attr_group,
+       &fts_fan_attr_group,
+       NULL
+};
+
+/*****************************************************************************/
+/* Module initialization / remove functions                                 */
+/*****************************************************************************/
+static int fts_remove(struct i2c_client *client)
+{
+       struct fts_data *data = dev_get_drvdata(&client->dev);
+
+       watchdog_unregister_device(&data->wdd);
+       return 0;
+}
+
+static int fts_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+       u8 revision;
+       struct fts_data *data;
+       int err;
+       s8 deviceid;
+       struct device *hwmon_dev;
+
+       if (client->addr != 0x73)
+               return -ENODEV;
+
+       /* Baseboard Management Controller check */
+       deviceid = i2c_smbus_read_byte_data(client, FTS_DEVICE_ID_REG);
+       if (deviceid > 0 && (deviceid & 0xF0) == 0x10) {
+               switch (deviceid & 0x0F) {
+               case 0x01:
+                       break;
+               default:
+                       dev_dbg(&client->dev,
+                               "No Baseboard Management Controller\n");
+                       return -ENODEV;
+               }
+       } else {
+               dev_dbg(&client->dev, "No fujitsu board\n");
+               return -ENODEV;
+       }
+
+       data = devm_kzalloc(&client->dev, sizeof(struct fts_data),
+                           GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       mutex_init(&data->update_lock);
+       mutex_init(&data->access_lock);
+       data->client = client;
+       dev_set_drvdata(&client->dev, data);
+
+       err = i2c_smbus_read_byte_data(client, FTS_DEVICE_REVISION_REG);
+       if (err < 0)
+               return err;
+       revision = err;
+
+       hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
+                                                          "ftsteutates",
+                                                          data,
+                                                          fts_attr_groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
+
+       err = fts_watchdog_init(data);
+       if (err)
+               return err;
+
+       dev_info(&client->dev, "Detected FTS Teutates chip, revision: %d.%d\n",
+                (revision & 0xF0) >> 4, revision & 0x0F);
+       return 0;
+}
+
+/*****************************************************************************/
+/* Module Details                                                           */
+/*****************************************************************************/
+static struct i2c_driver fts_driver = {
+       .driver = {
+               .name = "ftsteutates",
+       },
+       .id_table = fts_id,
+       .probe = fts_probe,
+       .remove = fts_remove,
+};
+
+module_i2c_driver(fts_driver);
+
+MODULE_AUTHOR("Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>");
+MODULE_DESCRIPTION("FTS Teutates driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
new file mode 100644 (file)
index 0000000..e6b4950
--- /dev/null
@@ -0,0 +1,445 @@
+/*
+ * INA3221 Triple Current/Voltage Monitor
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *     Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define INA3221_DRIVER_NAME            "ina3221"
+
+#define INA3221_CONFIG                 0x00
+#define INA3221_SHUNT1                 0x01
+#define INA3221_BUS1                   0x02
+#define INA3221_SHUNT2                 0x03
+#define INA3221_BUS2                   0x04
+#define INA3221_SHUNT3                 0x05
+#define INA3221_BUS3                   0x06
+#define INA3221_CRIT1                  0x07
+#define INA3221_WARN1                  0x08
+#define INA3221_CRIT2                  0x09
+#define INA3221_WARN2                  0x0a
+#define INA3221_CRIT3                  0x0b
+#define INA3221_WARN3                  0x0c
+#define INA3221_MASK_ENABLE            0x0f
+
+#define INA3221_CONFIG_MODE_SHUNT      BIT(1)
+#define INA3221_CONFIG_MODE_BUS                BIT(2)
+#define INA3221_CONFIG_MODE_CONTINUOUS BIT(3)
+
+#define INA3221_RSHUNT_DEFAULT         10000
+
+enum ina3221_fields {
+       /* Configuration */
+       F_RST,
+
+       /* Alert Flags */
+       F_WF3, F_WF2, F_WF1,
+       F_CF3, F_CF2, F_CF1,
+
+       /* sentinel */
+       F_MAX_FIELDS
+};
+
+static const struct reg_field ina3221_reg_fields[] = {
+       [F_RST] = REG_FIELD(INA3221_CONFIG, 15, 15),
+
+       [F_WF3] = REG_FIELD(INA3221_MASK_ENABLE, 3, 3),
+       [F_WF2] = REG_FIELD(INA3221_MASK_ENABLE, 4, 4),
+       [F_WF1] = REG_FIELD(INA3221_MASK_ENABLE, 5, 5),
+       [F_CF3] = REG_FIELD(INA3221_MASK_ENABLE, 7, 7),
+       [F_CF2] = REG_FIELD(INA3221_MASK_ENABLE, 8, 8),
+       [F_CF1] = REG_FIELD(INA3221_MASK_ENABLE, 9, 9),
+};
+
+enum ina3221_channels {
+       INA3221_CHANNEL1,
+       INA3221_CHANNEL2,
+       INA3221_CHANNEL3,
+       INA3221_NUM_CHANNELS
+};
+
+static const unsigned int register_channel[] = {
+       [INA3221_SHUNT1] = INA3221_CHANNEL1,
+       [INA3221_SHUNT2] = INA3221_CHANNEL2,
+       [INA3221_SHUNT3] = INA3221_CHANNEL3,
+       [INA3221_CRIT1] = INA3221_CHANNEL1,
+       [INA3221_CRIT2] = INA3221_CHANNEL2,
+       [INA3221_CRIT3] = INA3221_CHANNEL3,
+       [INA3221_WARN1] = INA3221_CHANNEL1,
+       [INA3221_WARN2] = INA3221_CHANNEL2,
+       [INA3221_WARN3] = INA3221_CHANNEL3,
+};
+
+/**
+ * struct ina3221_data - device specific information
+ * @regmap: Register map of the device
+ * @fields: Register fields of the device
+ * @shunt_resistors: Array of resistor values per channel
+ */
+struct ina3221_data {
+       struct regmap *regmap;
+       struct regmap_field *fields[F_MAX_FIELDS];
+       int shunt_resistors[INA3221_NUM_CHANNELS];
+};
+
+static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
+                             int *val)
+{
+       unsigned int regval;
+       int ret;
+
+       ret = regmap_read(ina->regmap, reg, &regval);
+       if (ret)
+               return ret;
+
+       *val = sign_extend32(regval >> 3, 12);
+
+       return 0;
+}
+
+static ssize_t ina3221_show_bus_voltage(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+       struct ina3221_data *ina = dev_get_drvdata(dev);
+       unsigned int reg = sd_attr->index;
+       int val, voltage_mv, ret;
+
+       ret = ina3221_read_value(ina, reg, &val);
+       if (ret)
+               return ret;
+
+       voltage_mv = val * 8;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", voltage_mv);
+}
+
+static ssize_t ina3221_show_shunt_voltage(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+       struct ina3221_data *ina = dev_get_drvdata(dev);
+       unsigned int reg = sd_attr->index;
+       int val, voltage_uv, ret;
+
+       ret = ina3221_read_value(ina, reg, &val);
+       if (ret)
+               return ret;
+       voltage_uv = val * 40;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", voltage_uv);
+}
+
+static ssize_t ina3221_show_current(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+       struct ina3221_data *ina = dev_get_drvdata(dev);
+       unsigned int reg = sd_attr->index;
+       unsigned int channel = register_channel[reg];
+       int resistance_uo = ina->shunt_resistors[channel];
+       int val, current_ma, voltage_nv, ret;
+
+       ret = ina3221_read_value(ina, reg, &val);
+       if (ret)
+               return ret;
+       voltage_nv = val * 40000;
+
+       current_ma = DIV_ROUND_CLOSEST(voltage_nv, resistance_uo);
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", current_ma);
+}
+
+static ssize_t ina3221_set_current(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
+{
+       struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+       struct ina3221_data *ina = dev_get_drvdata(dev);
+       unsigned int reg = sd_attr->index;
+       unsigned int channel = register_channel[reg];
+       int resistance_uo = ina->shunt_resistors[channel];
+       int val, current_ma, voltage_uv, ret;
+
+       ret = kstrtoint(buf, 0, &current_ma);
+       if (ret)
+               return ret;
+
+       /* clamp current */
+       current_ma = clamp_val(current_ma,
+                              INT_MIN / resistance_uo,
+                              INT_MAX / resistance_uo);
+
+       voltage_uv = DIV_ROUND_CLOSEST(current_ma * resistance_uo, 1000);
+
+       /* clamp voltage */
+       voltage_uv = clamp_val(voltage_uv, -163800, 163800);
+
+       /* 1 / 40uV(scale) << 3(register shift) = 5 */
+       val = DIV_ROUND_CLOSEST(voltage_uv, 5) & 0xfff8;
+
+       ret = regmap_write(ina->regmap, reg, val);
+       if (ret)
+               return ret;
+
+       return count;
+}
+
+static ssize_t ina3221_show_shunt(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+       struct ina3221_data *ina = dev_get_drvdata(dev);
+       unsigned int channel = sd_attr->index;
+       unsigned int resistance_uo;
+
+       resistance_uo = ina->shunt_resistors[channel];
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", resistance_uo);
+}
+
+static ssize_t ina3221_set_shunt(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+       struct ina3221_data *ina = dev_get_drvdata(dev);
+       unsigned int channel = sd_attr->index;
+       int val;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &val);
+       if (ret)
+               return ret;
+
+       val = clamp_val(val, 1, INT_MAX);
+
+       ina->shunt_resistors[channel] = val;
+
+       return count;
+}
+
+static ssize_t ina3221_show_alert(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct sensor_device_attribute *sd_attr = to_sensor_dev_attr(attr);
+       struct ina3221_data *ina = dev_get_drvdata(dev);
+       unsigned int field = sd_attr->index;
+       unsigned int regval;
+       int ret;
+
+       ret = regmap_field_read(ina->fields[field], &regval);
+       if (ret)
+               return ret;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", regval);
+}
+
+/* bus voltage */
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO,
+               ina3221_show_bus_voltage, NULL, INA3221_BUS1);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO,
+               ina3221_show_bus_voltage, NULL, INA3221_BUS2);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO,
+               ina3221_show_bus_voltage, NULL, INA3221_BUS3);
+
+/* calculated current */
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO,
+               ina3221_show_current, NULL, INA3221_SHUNT1);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO,
+               ina3221_show_current, NULL, INA3221_SHUNT2);
+static SENSOR_DEVICE_ATTR(curr3_input, S_IRUGO,
+               ina3221_show_current, NULL, INA3221_SHUNT3);
+
+/* shunt resistance */
+static SENSOR_DEVICE_ATTR(shunt1_resistor, S_IRUGO | S_IWUSR,
+               ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL1);
+static SENSOR_DEVICE_ATTR(shunt2_resistor, S_IRUGO | S_IWUSR,
+               ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL2);
+static SENSOR_DEVICE_ATTR(shunt3_resistor, S_IRUGO | S_IWUSR,
+               ina3221_show_shunt, ina3221_set_shunt, INA3221_CHANNEL3);
+
+/* critical current */
+static SENSOR_DEVICE_ATTR(curr1_crit, S_IRUGO | S_IWUSR,
+               ina3221_show_current, ina3221_set_current, INA3221_CRIT1);
+static SENSOR_DEVICE_ATTR(curr2_crit, S_IRUGO | S_IWUSR,
+               ina3221_show_current, ina3221_set_current, INA3221_CRIT2);
+static SENSOR_DEVICE_ATTR(curr3_crit, S_IRUGO | S_IWUSR,
+               ina3221_show_current, ina3221_set_current, INA3221_CRIT3);
+
+/* critical current alert */
+static SENSOR_DEVICE_ATTR(curr1_crit_alarm, S_IRUGO,
+               ina3221_show_alert, NULL, F_CF1);
+static SENSOR_DEVICE_ATTR(curr2_crit_alarm, S_IRUGO,
+               ina3221_show_alert, NULL, F_CF2);
+static SENSOR_DEVICE_ATTR(curr3_crit_alarm, S_IRUGO,
+               ina3221_show_alert, NULL, F_CF3);
+
+/* warning current */
+static SENSOR_DEVICE_ATTR(curr1_max, S_IRUGO | S_IWUSR,
+               ina3221_show_current, ina3221_set_current, INA3221_WARN1);
+static SENSOR_DEVICE_ATTR(curr2_max, S_IRUGO | S_IWUSR,
+               ina3221_show_current, ina3221_set_current, INA3221_WARN2);
+static SENSOR_DEVICE_ATTR(curr3_max, S_IRUGO | S_IWUSR,
+               ina3221_show_current, ina3221_set_current, INA3221_WARN3);
+
+/* warning current alert */
+static SENSOR_DEVICE_ATTR(curr1_max_alarm, S_IRUGO,
+               ina3221_show_alert, NULL, F_WF1);
+static SENSOR_DEVICE_ATTR(curr2_max_alarm, S_IRUGO,
+               ina3221_show_alert, NULL, F_WF2);
+static SENSOR_DEVICE_ATTR(curr3_max_alarm, S_IRUGO,
+               ina3221_show_alert, NULL, F_WF3);
+
+/* shunt voltage */
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO,
+               ina3221_show_shunt_voltage, NULL, INA3221_SHUNT1);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO,
+               ina3221_show_shunt_voltage, NULL, INA3221_SHUNT2);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO,
+               ina3221_show_shunt_voltage, NULL, INA3221_SHUNT3);
+
+static struct attribute *ina3221_attrs[] = {
+       /* channel 1 */
+       &sensor_dev_attr_in1_input.dev_attr.attr,
+       &sensor_dev_attr_curr1_input.dev_attr.attr,
+       &sensor_dev_attr_shunt1_resistor.dev_attr.attr,
+       &sensor_dev_attr_curr1_crit.dev_attr.attr,
+       &sensor_dev_attr_curr1_crit_alarm.dev_attr.attr,
+       &sensor_dev_attr_curr1_max.dev_attr.attr,
+       &sensor_dev_attr_curr1_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_in4_input.dev_attr.attr,
+
+       /* channel 2 */
+       &sensor_dev_attr_in2_input.dev_attr.attr,
+       &sensor_dev_attr_curr2_input.dev_attr.attr,
+       &sensor_dev_attr_shunt2_resistor.dev_attr.attr,
+       &sensor_dev_attr_curr2_crit.dev_attr.attr,
+       &sensor_dev_attr_curr2_crit_alarm.dev_attr.attr,
+       &sensor_dev_attr_curr2_max.dev_attr.attr,
+       &sensor_dev_attr_curr2_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_in5_input.dev_attr.attr,
+
+       /* channel 3 */
+       &sensor_dev_attr_in3_input.dev_attr.attr,
+       &sensor_dev_attr_curr3_input.dev_attr.attr,
+       &sensor_dev_attr_shunt3_resistor.dev_attr.attr,
+       &sensor_dev_attr_curr3_crit.dev_attr.attr,
+       &sensor_dev_attr_curr3_crit_alarm.dev_attr.attr,
+       &sensor_dev_attr_curr3_max.dev_attr.attr,
+       &sensor_dev_attr_curr3_max_alarm.dev_attr.attr,
+       &sensor_dev_attr_in6_input.dev_attr.attr,
+
+       NULL,
+};
+ATTRIBUTE_GROUPS(ina3221);
+
+static const struct regmap_range ina3221_yes_ranges[] = {
+       regmap_reg_range(INA3221_SHUNT1, INA3221_BUS3),
+       regmap_reg_range(INA3221_MASK_ENABLE, INA3221_MASK_ENABLE),
+};
+
+static const struct regmap_access_table ina3221_volatile_table = {
+       .yes_ranges = ina3221_yes_ranges,
+       .n_yes_ranges = ARRAY_SIZE(ina3221_yes_ranges),
+};
+
+static const struct regmap_config ina3221_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+
+       .cache_type = REGCACHE_RBTREE,
+       .volatile_table = &ina3221_volatile_table,
+};
+
+static int ina3221_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct ina3221_data *ina;
+       struct device *hwmon_dev;
+       int i, ret;
+
+       ina = devm_kzalloc(dev, sizeof(*ina), GFP_KERNEL);
+       if (!ina)
+               return -ENOMEM;
+
+       ina->regmap = devm_regmap_init_i2c(client, &ina3221_regmap_config);
+       if (IS_ERR(ina->regmap)) {
+               dev_err(dev, "Unable to allocate register map\n");
+               return PTR_ERR(ina->regmap);
+       }
+
+       for (i = 0; i < F_MAX_FIELDS; i++) {
+               ina->fields[i] = devm_regmap_field_alloc(dev,
+                                                        ina->regmap,
+                                                        ina3221_reg_fields[i]);
+               if (IS_ERR(ina->fields[i])) {
+                       dev_err(dev, "Unable to allocate regmap fields\n");
+                       return PTR_ERR(ina->fields[i]);
+               }
+       }
+
+       for (i = 0; i < INA3221_NUM_CHANNELS; i++)
+               ina->shunt_resistors[i] = INA3221_RSHUNT_DEFAULT;
+
+       ret = regmap_field_write(ina->fields[F_RST], true);
+       if (ret) {
+               dev_err(dev, "Unable to reset device\n");
+               return ret;
+       }
+
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+                                                          client->name,
+                                                          ina, ina3221_groups);
+       if (IS_ERR(hwmon_dev)) {
+               dev_err(dev, "Unable to register hwmon device\n");
+               return PTR_ERR(hwmon_dev);
+       }
+
+       return 0;
+}
+
+static const struct of_device_id ina3221_of_match_table[] = {
+       { .compatible = "ti,ina3221", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ina3221_of_match_table);
+
+static const struct i2c_device_id ina3221_ids[] = {
+       { "ina3221", 0 },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, ina3221_ids);
+
+static struct i2c_driver ina3221_i2c_driver = {
+       .probe = ina3221_probe,
+       .driver = {
+               .name = INA3221_DRIVER_NAME,
+               .of_match_table = ina3221_of_match_table,
+       },
+       .id_table = ina3221_ids,
+};
+module_i2c_driver(ina3221_i2c_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("Texas Instruments INA3221 HWMon Driver");
+MODULE_LICENSE("GPL v2");
index 9887d3224a865e9aa0de33e8d38894600cc0be3b..9d5f85f3384fae0f0dd44e1d34035780dcd858b4 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
 #include <linux/mutex.h>
+#include <linux/of.h>
 
 /* Addresses to scan */
 static const unsigned short normal_i2c[] = {
@@ -104,6 +105,9 @@ static const unsigned short normal_i2c[] = {
 #define MCP9804_DEVID          0x0200
 #define MCP9804_DEVID_MASK     0xfffc
 
+#define MCP9808_DEVID          0x0400
+#define MCP9808_DEVID_MASK     0xfffc
+
 #define MCP98242_DEVID         0x2000
 #define MCP98242_DEVID_MASK    0xfffc
 
@@ -160,6 +164,7 @@ static struct jc42_chips jc42_chips[] = {
        { IDT_MANID, TS3001_DEVID, TS3001_DEVID_MASK },
        { MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK },
        { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
+       { MCP_MANID, MCP9808_DEVID, MCP9808_DEVID_MASK },
        { MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
        { MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
        { MCP_MANID, MCP98244_DEVID, MCP98244_DEVID_MASK },
@@ -537,11 +542,20 @@ static const struct i2c_device_id jc42_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, jc42_id);
 
+#ifdef CONFIG_OF
+static const struct of_device_id jc42_of_ids[] = {
+       { .compatible = "jedec,jc-42.4-temp", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, jc42_of_ids);
+#endif
+
 static struct i2c_driver jc42_driver = {
-       .class          = I2C_CLASS_SPD,
+       .class          = I2C_CLASS_SPD | I2C_CLASS_HWMON,
        .driver = {
                .name   = "jc42",
                .pm = JC42_DEV_PM_OPS,
+               .of_match_table = of_match_ptr(jc42_of_ids),
        },
        .probe          = jc42_probe,
        .remove         = jc42_remove,
index df9b3447f2a8dbee2da136e3b01d86157389d486..0621ee1b3c98f1ebb9a86fb316fb8d0690a6e9e1 100644 (file)
 
 struct jz4740_hwmon {
        void __iomem *base;
-
        int irq;
-
        const struct mfd_cell *cell;
-       struct device *hwmon;
-
+       struct platform_device *pdev;
        struct completion read_completion;
-
        struct mutex lock;
 };
 
-static ssize_t jz4740_hwmon_show_name(struct device *dev,
-       struct device_attribute *dev_attr, char *buf)
-{
-       return sprintf(buf, "jz4740\n");
-}
-
 static irqreturn_t jz4740_hwmon_irq(int irq, void *data)
 {
        struct jz4740_hwmon *hwmon = data;
@@ -58,6 +48,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
        struct device_attribute *dev_attr, char *buf)
 {
        struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
+       struct platform_device *pdev = hwmon->pdev;
        struct completion *completion = &hwmon->read_completion;
        long t;
        unsigned long val;
@@ -68,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
        reinit_completion(completion);
 
        enable_irq(hwmon->irq);
-       hwmon->cell->enable(to_platform_device(dev));
+       hwmon->cell->enable(pdev);
 
        t = wait_for_completion_interruptible_timeout(completion, HZ);
 
@@ -80,7 +71,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
                ret = t ? t : -ETIMEDOUT;
        }
 
-       hwmon->cell->disable(to_platform_device(dev));
+       hwmon->cell->disable(pdev);
        disable_irq(hwmon->irq);
 
        mutex_unlock(&hwmon->lock);
@@ -88,26 +79,24 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
        return ret;
 }
 
-static DEVICE_ATTR(name, S_IRUGO, jz4740_hwmon_show_name, NULL);
 static DEVICE_ATTR(in0_input, S_IRUGO, jz4740_hwmon_read_adcin, NULL);
 
-static struct attribute *jz4740_hwmon_attributes[] = {
-       &dev_attr_name.attr,
+static struct attribute *jz4740_attrs[] = {
        &dev_attr_in0_input.attr,
        NULL
 };
 
-static const struct attribute_group jz4740_hwmon_attr_group = {
-       .attrs = jz4740_hwmon_attributes,
-};
+ATTRIBUTE_GROUPS(jz4740);
 
 static int jz4740_hwmon_probe(struct platform_device *pdev)
 {
        int ret;
+       struct device *dev = &pdev->dev;
        struct jz4740_hwmon *hwmon;
+       struct device *hwmon_dev;
        struct resource *mem;
 
-       hwmon = devm_kzalloc(&pdev->dev, sizeof(*hwmon), GFP_KERNEL);
+       hwmon = devm_kzalloc(dev, sizeof(*hwmon), GFP_KERNEL);
        if (!hwmon)
                return -ENOMEM;
 
@@ -125,12 +114,11 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
        if (IS_ERR(hwmon->base))
                return PTR_ERR(hwmon->base);
 
+       hwmon->pdev = pdev;
        init_completion(&hwmon->read_completion);
        mutex_init(&hwmon->lock);
 
-       platform_set_drvdata(pdev, hwmon);
-
-       ret = devm_request_irq(&pdev->dev, hwmon->irq, jz4740_hwmon_irq, 0,
+       ret = devm_request_irq(dev, hwmon->irq, jz4740_hwmon_irq, 0,
                               pdev->name, hwmon);
        if (ret) {
                dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
@@ -138,38 +126,13 @@ static int jz4740_hwmon_probe(struct platform_device *pdev)
        }
        disable_irq(hwmon->irq);
 
-       ret = sysfs_create_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to create sysfs group: %d\n", ret);
-               return ret;
-       }
-
-       hwmon->hwmon = hwmon_device_register(&pdev->dev);
-       if (IS_ERR(hwmon->hwmon)) {
-               ret = PTR_ERR(hwmon->hwmon);
-               goto err_remove_file;
-       }
-
-       return 0;
-
-err_remove_file:
-       sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
-       return ret;
-}
-
-static int jz4740_hwmon_remove(struct platform_device *pdev)
-{
-       struct jz4740_hwmon *hwmon = platform_get_drvdata(pdev);
-
-       hwmon_device_unregister(hwmon->hwmon);
-       sysfs_remove_group(&pdev->dev.kobj, &jz4740_hwmon_attr_group);
-
-       return 0;
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, "jz4740", hwmon,
+                                                          jz4740_groups);
+       return PTR_ERR_OR_ZERO(hwmon_dev);
 }
 
 static struct platform_driver jz4740_hwmon_driver = {
        .probe  = jz4740_hwmon_probe,
-       .remove = jz4740_hwmon_remove,
        .driver = {
                .name = "jz4740-hwmon",
        },
index 69166ab3151d52db66421ad353082fd57cf4cf3d..547a9c87c68c279019a1cdc2a5f07e737974ac51 100644 (file)
@@ -26,8 +26,8 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/err.h>
-#include <linux/mutex.h>
 #include <linux/of.h>
+#include <linux/regmap.h>
 #include <linux/thermal.h>
 #include "lm75.h"
 
@@ -66,35 +66,21 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b, 0x4c,
 
 
 /* The LM75 registers */
+#define LM75_REG_TEMP          0x00
 #define LM75_REG_CONF          0x01
-static const u8 LM75_REG_TEMP[3] = {
-       0x00,           /* input */
-       0x03,           /* max */
-       0x02,           /* hyst */
-};
+#define LM75_REG_HYST          0x02
+#define LM75_REG_MAX           0x03
 
 /* Each client has this additional data */
 struct lm75_data {
        struct i2c_client       *client;
-       struct device           *hwmon_dev;
-       struct mutex            update_lock;
+       struct regmap           *regmap;
        u8                      orig_conf;
        u8                      resolution;     /* In bits, between 9 and 12 */
        u8                      resolution_limits;
-       char                    valid;          /* !=0 if registers are valid */
-       unsigned long           last_updated;   /* In jiffies */
-       unsigned long           sample_time;    /* In jiffies */
-       s16                     temp[3];        /* Register values,
-                                                  0 = input
-                                                  1 = max
-                                                  2 = hyst */
+       unsigned int            sample_time;    /* In ms */
 };
 
-static int lm75_read_value(struct i2c_client *client, u8 reg);
-static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value);
-static struct lm75_data *lm75_update_device(struct device *dev);
-
-
 /*-----------------------------------------------------------------------*/
 
 static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
@@ -106,12 +92,15 @@ static inline long lm75_reg_to_mc(s16 temp, u8 resolution)
 
 static int lm75_read_temp(void *dev, int *temp)
 {
-       struct lm75_data *data = lm75_update_device(dev);
+       struct lm75_data *data = dev_get_drvdata(dev);
+       unsigned int _temp;
+       int err;
 
-       if (IS_ERR(data))
-               return PTR_ERR(data);
+       err = regmap_read(data->regmap, LM75_REG_TEMP, &_temp);
+       if (err < 0)
+               return err;
 
-       *temp = lm75_reg_to_mc(data->temp[0], data->resolution);
+       *temp = lm75_reg_to_mc(_temp, data->resolution);
 
        return 0;
 }
@@ -120,13 +109,15 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
                         char *buf)
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
-       struct lm75_data *data = lm75_update_device(dev);
+       struct lm75_data *data = dev_get_drvdata(dev);
+       unsigned int temp = 0;
+       int err;
 
-       if (IS_ERR(data))
-               return PTR_ERR(data);
+       err = regmap_read(data->regmap, attr->index, &temp);
+       if (err < 0)
+               return err;
 
-       return sprintf(buf, "%ld\n", lm75_reg_to_mc(data->temp[attr->index],
-                                                   data->resolution));
+       return sprintf(buf, "%ld\n", lm75_reg_to_mc(temp, data->resolution));
 }
 
 static ssize_t set_temp(struct device *dev, struct device_attribute *da,
@@ -134,8 +125,6 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
        struct lm75_data *data = dev_get_drvdata(dev);
-       struct i2c_client *client = data->client;
-       int nr = attr->index;
        long temp;
        int error;
        u8 resolution;
@@ -153,25 +142,36 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
        else
                resolution = data->resolution;
 
-       mutex_lock(&data->update_lock);
        temp = clamp_val(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
-       data->temp[nr] = DIV_ROUND_CLOSEST(temp  << (resolution - 8),
-                                          1000) << (16 - resolution);
-       lm75_write_value(client, LM75_REG_TEMP[nr], data->temp[nr]);
-       mutex_unlock(&data->update_lock);
+       temp = DIV_ROUND_CLOSEST(temp  << (resolution - 8),
+                                1000) << (16 - resolution);
+       error = regmap_write(data->regmap, attr->index, temp);
+       if (error < 0)
+               return error;
+
        return count;
 }
 
+static ssize_t show_update_interval(struct device *dev,
+                                   struct device_attribute *da, char *buf)
+{
+       struct lm75_data *data = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%u\n", data->sample_time);
+}
+
 static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO,
-                       show_temp, set_temp, 1);
+                       show_temp, set_temp, LM75_REG_MAX);
 static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO,
-                       show_temp, set_temp, 2);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+                       show_temp, set_temp, LM75_REG_HYST);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, LM75_REG_TEMP);
+static DEVICE_ATTR(update_interval, S_IRUGO, show_update_interval, NULL);
 
 static struct attribute *lm75_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
        &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+       &dev_attr_update_interval.attr,
 
        NULL
 };
@@ -185,10 +185,40 @@ static const struct thermal_zone_of_device_ops lm75_of_thermal_ops = {
 
 /* device probe and removal */
 
+static bool lm75_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+       return reg != LM75_REG_TEMP;
+}
+
+static bool lm75_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       return reg == LM75_REG_TEMP;
+}
+
+static const struct regmap_config lm75_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+       .max_register = LM75_REG_MAX,
+       .writeable_reg = lm75_is_writeable_reg,
+       .volatile_reg = lm75_is_volatile_reg,
+       .val_format_endian = REGMAP_ENDIAN_BIG,
+       .cache_type = REGCACHE_RBTREE,
+       .use_single_rw = true,
+};
+
+static void lm75_remove(void *data)
+{
+       struct lm75_data *lm75 = data;
+       struct i2c_client *client = lm75->client;
+
+       i2c_smbus_write_byte_data(client, LM75_REG_CONF, lm75->orig_conf);
+}
+
 static int
 lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
        struct device *dev = &client->dev;
+       struct device *hwmon_dev;
        struct lm75_data *data;
        int status;
        u8 set_mask, clr_mask;
@@ -204,8 +234,10 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
                return -ENOMEM;
 
        data->client = client;
-       i2c_set_clientdata(client, data);
-       mutex_init(&data->update_lock);
+
+       data->regmap = devm_regmap_init_i2c(client, &lm75_regmap_config);
+       if (IS_ERR(data->regmap))
+               return PTR_ERR(data->regmap);
 
        /* Set to LM75 resolution (9 bits, 1/2 degree C) and range.
         * Then tweak to be more precise when appropriate.
@@ -217,7 +249,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
        case adt75:
                clr_mask |= 1 << 5;             /* not one-shot mode */
                data->resolution = 12;
-               data->sample_time = HZ / 8;
+               data->sample_time = MSEC_PER_SEC / 8;
                break;
        case ds1775:
        case ds75:
@@ -225,35 +257,35 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
                clr_mask |= 3 << 5;
                set_mask |= 2 << 5;             /* 11-bit mode */
                data->resolution = 11;
-               data->sample_time = HZ;
+               data->sample_time = MSEC_PER_SEC;
                break;
        case ds7505:
                set_mask |= 3 << 5;             /* 12-bit mode */
                data->resolution = 12;
-               data->sample_time = HZ / 4;
+               data->sample_time = MSEC_PER_SEC / 4;
                break;
        case g751:
        case lm75:
        case lm75a:
                data->resolution = 9;
-               data->sample_time = HZ / 2;
+               data->sample_time = MSEC_PER_SEC / 2;
                break;
        case lm75b:
                data->resolution = 11;
-               data->sample_time = HZ / 4;
+               data->sample_time = MSEC_PER_SEC / 4;
                break;
        case max6625:
                data->resolution = 9;
-               data->sample_time = HZ / 4;
+               data->sample_time = MSEC_PER_SEC / 4;
                break;
        case max6626:
                data->resolution = 12;
                data->resolution_limits = 9;
-               data->sample_time = HZ / 4;
+               data->sample_time = MSEC_PER_SEC / 4;
                break;
        case tcn75:
                data->resolution = 9;
-               data->sample_time = HZ / 8;
+               data->sample_time = MSEC_PER_SEC / 8;
                break;
        case mcp980x:
                data->resolution_limits = 9;
@@ -262,14 +294,14 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
        case tmp101:
                set_mask |= 3 << 5;             /* 12-bit mode */
                data->resolution = 12;
-               data->sample_time = HZ;
+               data->sample_time = MSEC_PER_SEC;
                clr_mask |= 1 << 7;             /* not one-shot mode */
                break;
        case tmp112:
                set_mask |= 3 << 5;             /* 12-bit mode */
                clr_mask |= 1 << 7;             /* not one-shot mode */
                data->resolution = 12;
-               data->sample_time = HZ / 4;
+               data->sample_time = MSEC_PER_SEC / 4;
                break;
        case tmp105:
        case tmp175:
@@ -278,17 +310,17 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
                set_mask |= 3 << 5;             /* 12-bit mode */
                clr_mask |= 1 << 7;             /* not one-shot mode */
                data->resolution = 12;
-               data->sample_time = HZ / 2;
+               data->sample_time = MSEC_PER_SEC / 2;
                break;
        case tmp75c:
                clr_mask |= 1 << 5;             /* not one-shot mode */
                data->resolution = 12;
-               data->sample_time = HZ / 4;
+               data->sample_time = MSEC_PER_SEC / 4;
                break;
        }
 
        /* configure as specified */
-       status = lm75_read_value(client, LM75_REG_CONF);
+       status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
        if (status < 0) {
                dev_dbg(dev, "Can't read config? %d\n", status);
                return status;
@@ -297,30 +329,23 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
        new = status & ~clr_mask;
        new |= set_mask;
        if (status != new)
-               lm75_write_value(client, LM75_REG_CONF, new);
-       dev_dbg(dev, "Config %02x\n", new);
+               i2c_smbus_write_byte_data(client, LM75_REG_CONF, new);
 
-       data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
-                                                           data, lm75_groups);
-       if (IS_ERR(data->hwmon_dev))
-               return PTR_ERR(data->hwmon_dev);
+       devm_add_action(dev, lm75_remove, data);
 
-       devm_thermal_zone_of_sensor_register(data->hwmon_dev, 0,
-                                            data->hwmon_dev,
-                                            &lm75_of_thermal_ops);
+       dev_dbg(dev, "Config %02x\n", new);
 
-       dev_info(dev, "%s: sensor '%s'\n",
-                dev_name(data->hwmon_dev), client->name);
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, lm75_groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
-       return 0;
-}
+       devm_thermal_zone_of_sensor_register(hwmon_dev, 0,
+                                            hwmon_dev,
+                                            &lm75_of_thermal_ops);
 
-static int lm75_remove(struct i2c_client *client)
-{
-       struct lm75_data *data = i2c_get_clientdata(client);
+       dev_info(dev, "%s: sensor '%s'\n", dev_name(hwmon_dev), client->name);
 
-       hwmon_device_unregister(data->hwmon_dev);
-       lm75_write_value(client, LM75_REG_CONF, data->orig_conf);
        return 0;
 }
 
@@ -449,13 +474,13 @@ static int lm75_suspend(struct device *dev)
 {
        int status;
        struct i2c_client *client = to_i2c_client(dev);
-       status = lm75_read_value(client, LM75_REG_CONF);
+       status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
        if (status < 0) {
                dev_dbg(&client->dev, "Can't read config? %d\n", status);
                return status;
        }
        status = status | LM75_SHUTDOWN;
-       lm75_write_value(client, LM75_REG_CONF, status);
+       i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
        return 0;
 }
 
@@ -463,13 +488,13 @@ static int lm75_resume(struct device *dev)
 {
        int status;
        struct i2c_client *client = to_i2c_client(dev);
-       status = lm75_read_value(client, LM75_REG_CONF);
+       status = i2c_smbus_read_byte_data(client, LM75_REG_CONF);
        if (status < 0) {
                dev_dbg(&client->dev, "Can't read config? %d\n", status);
                return status;
        }
        status = status & ~LM75_SHUTDOWN;
-       lm75_write_value(client, LM75_REG_CONF, status);
+       i2c_smbus_write_byte_data(client, LM75_REG_CONF, status);
        return 0;
 }
 
@@ -489,73 +514,11 @@ static struct i2c_driver lm75_driver = {
                .pm     = LM75_DEV_PM_OPS,
        },
        .probe          = lm75_probe,
-       .remove         = lm75_remove,
        .id_table       = lm75_ids,
        .detect         = lm75_detect,
        .address_list   = normal_i2c,
 };
 
-/*-----------------------------------------------------------------------*/
-
-/* register access */
-
-/*
- * All registers are word-sized, except for the configuration register.
- * LM75 uses a high-byte first convention, which is exactly opposite to
- * the SMBus standard.
- */
-static int lm75_read_value(struct i2c_client *client, u8 reg)
-{
-       if (reg == LM75_REG_CONF)
-               return i2c_smbus_read_byte_data(client, reg);
-       else
-               return i2c_smbus_read_word_swapped(client, reg);
-}
-
-static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
-{
-       if (reg == LM75_REG_CONF)
-               return i2c_smbus_write_byte_data(client, reg, value);
-       else
-               return i2c_smbus_write_word_swapped(client, reg, value);
-}
-
-static struct lm75_data *lm75_update_device(struct device *dev)
-{
-       struct lm75_data *data = dev_get_drvdata(dev);
-       struct i2c_client *client = data->client;
-       struct lm75_data *ret = data;
-
-       mutex_lock(&data->update_lock);
-
-       if (time_after(jiffies, data->last_updated + data->sample_time)
-           || !data->valid) {
-               int i;
-               dev_dbg(&client->dev, "Starting lm75 update\n");
-
-               for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
-                       int status;
-
-                       status = lm75_read_value(client, LM75_REG_TEMP[i]);
-                       if (unlikely(status < 0)) {
-                               dev_dbg(dev,
-                                       "LM75: Failed to read value: reg %d, error %d\n",
-                                       LM75_REG_TEMP[i], status);
-                               ret = ERR_PTR(status);
-                               data->valid = 0;
-                               goto abort;
-                       }
-                       data->temp[i] = status;
-               }
-               data->last_updated = jiffies;
-               data->valid = 1;
-       }
-
-abort:
-       mutex_unlock(&data->update_lock);
-       return ret;
-}
-
 module_i2c_driver(lm75_driver);
 
 MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>");
index e30a5939dc0d5566ae76ef31e790a578d8b0877c..f51e758ba5298c1d05f5c5301ea57dc95204b85c 100644 (file)
@@ -171,7 +171,6 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
 
 #define SA56004_REG_R_LOCAL_TEMPL 0x22
 
-#define LM90_DEF_CONVRATE_RVAL 6       /* Def conversion rate register value */
 #define LM90_MAX_CONVRATE_MS   16000   /* Maximum conversion rate in ms */
 
 /* TMP451 registers */
@@ -366,11 +365,9 @@ enum lm90_temp11_reg_index {
 
 struct lm90_data {
        struct i2c_client *client;
-       struct device *hwmon_dev;
        const struct attribute_group *groups[6];
        struct mutex update_lock;
-       struct regulator *regulator;
-       char valid; /* zero until following fields are valid */
+       bool valid;             /* true if register values are valid */
        unsigned long last_updated; /* in jiffies */
        int kind;
        u32 flags;
@@ -412,7 +409,7 @@ static inline s32 adm1032_write_byte(struct i2c_client *client, u8 value)
  * because we don't want the address pointer to change between the write
  * byte and the read byte transactions.
  */
-static int lm90_read_reg(struct i2c_client *client, u8 reg, u8 *value)
+static int lm90_read_reg(struct i2c_client *client, u8 reg)
 {
        int err;
 
@@ -423,20 +420,12 @@ static int lm90_read_reg(struct i2c_client *client, u8 reg, u8 *value)
        } else
                err = i2c_smbus_read_byte_data(client, reg);
 
-       if (err < 0) {
-               dev_warn(&client->dev, "Register %#02x read failed (%d)\n",
-                        reg, err);
-               return err;
-       }
-       *value = err;
-
-       return 0;
+       return err;
 }
 
-static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
+static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl)
 {
-       int err;
-       u8 oldh, newh, l;
+       int oldh, newh, l;
 
        /*
         * There is a trick here. We have to read two registers to have the
@@ -451,18 +440,21 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
         * we have to read the low byte again, and now we believe we have a
         * correct reading.
         */
-       if ((err = lm90_read_reg(client, regh, &oldh))
-        || (err = lm90_read_reg(client, regl, &l))
-        || (err = lm90_read_reg(client, regh, &newh)))
-               return err;
+       oldh = lm90_read_reg(client, regh);
+       if (oldh < 0)
+               return oldh;
+       l = lm90_read_reg(client, regl);
+       if (l < 0)
+               return l;
+       newh = lm90_read_reg(client, regh);
+       if (newh < 0)
+               return newh;
        if (oldh != newh) {
-               err = lm90_read_reg(client, regl, &l);
-               if (err)
-                       return err;
+               l = lm90_read_reg(client, regl);
+               if (l < 0)
+                       return l;
        }
-       *value = (newh << 8) | l;
-
-       return 0;
+       return (newh << 8) | l;
 }
 
 /*
@@ -473,20 +465,23 @@ static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
  * various registers have different meanings as a result of selecting a
  * non-default remote channel.
  */
-static inline void lm90_select_remote_channel(struct i2c_client *client,
-                                             struct lm90_data *data,
-                                             int channel)
+static inline int lm90_select_remote_channel(struct i2c_client *client,
+                                            struct lm90_data *data,
+                                            int channel)
 {
-       u8 config;
+       int config;
 
        if (data->kind == max6696) {
-               lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
+               config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+               if (config < 0)
+                       return config;
                config &= ~0x08;
                if (channel)
                        config |= 0x08;
                i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
                                          config);
        }
+       return 0;
 }
 
 /*
@@ -513,118 +508,204 @@ static void lm90_set_convrate(struct i2c_client *client, struct lm90_data *data,
        data->update_interval = DIV_ROUND_CLOSEST(update_interval, 64);
 }
 
+static int lm90_update_limits(struct device *dev)
+{
+       struct lm90_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       int val;
+
+       val = lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT);
+       if (val < 0)
+               return val;
+       data->temp8[LOCAL_CRIT] = val;
+
+       val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+       if (val < 0)
+               return val;
+       data->temp8[REMOTE_CRIT] = val;
+
+       val = lm90_read_reg(client, LM90_REG_R_TCRIT_HYST);
+       if (val < 0)
+               return val;
+       data->temp_hyst = val;
+
+       lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
+       if (val < 0)
+               return val;
+       data->temp11[REMOTE_LOW] = val << 8;
+
+       if (data->flags & LM90_HAVE_REM_LIMIT_EXT) {
+               val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL);
+               if (val < 0)
+                       return val;
+               data->temp11[REMOTE_LOW] |= val;
+       }
+
+       val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH);
+       if (val < 0)
+               return val;
+       data->temp11[REMOTE_HIGH] = val << 8;
+
+       if (data->flags & LM90_HAVE_REM_LIMIT_EXT) {
+               val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL);
+               if (val < 0)
+                       return val;
+               data->temp11[REMOTE_HIGH] |= val;
+       }
+
+       if (data->flags & LM90_HAVE_OFFSET) {
+               val = lm90_read16(client, LM90_REG_R_REMOTE_OFFSH,
+                                 LM90_REG_R_REMOTE_OFFSL);
+               if (val < 0)
+                       return val;
+               data->temp11[REMOTE_OFFSET] = val;
+       }
+
+       if (data->flags & LM90_HAVE_EMERGENCY) {
+               val = lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG);
+               if (val < 0)
+                       return val;
+               data->temp8[LOCAL_EMERG] = val;
+
+               val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG);
+               if (val < 0)
+                       return val;
+               data->temp8[REMOTE_EMERG] = val;
+       }
+
+       if (data->kind == max6696) {
+               val = lm90_select_remote_channel(client, data, 1);
+               if (val < 0)
+                       return val;
+
+               val = lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT);
+               if (val < 0)
+                       return val;
+               data->temp8[REMOTE2_CRIT] = val;
+
+               val = lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG);
+               if (val < 0)
+                       return val;
+               data->temp8[REMOTE2_EMERG] = val;
+
+               val = lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH);
+               if (val < 0)
+                       return val;
+               data->temp11[REMOTE2_LOW] = val << 8;
+
+               val = lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH);
+               if (val < 0)
+                       return val;
+               data->temp11[REMOTE2_HIGH] = val << 8;
+
+               lm90_select_remote_channel(client, data, 0);
+       }
+
+       return 0;
+}
+
 static struct lm90_data *lm90_update_device(struct device *dev)
 {
        struct lm90_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
        unsigned long next_update;
+       int val = 0;
 
        mutex_lock(&data->update_lock);
 
+       if (!data->valid) {
+               val = lm90_update_limits(dev);
+               if (val < 0)
+                       goto error;
+       }
+
        next_update = data->last_updated +
                      msecs_to_jiffies(data->update_interval);
        if (time_after(jiffies, next_update) || !data->valid) {
-               u8 h, l;
-               u8 alarms;
-
                dev_dbg(&client->dev, "Updating lm90 data.\n");
-               lm90_read_reg(client, LM90_REG_R_LOCAL_LOW,
-                             &data->temp8[LOCAL_LOW]);
-               lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH,
-                             &data->temp8[LOCAL_HIGH]);
-               lm90_read_reg(client, LM90_REG_R_LOCAL_CRIT,
-                             &data->temp8[LOCAL_CRIT]);
-               lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
-                             &data->temp8[REMOTE_CRIT]);
-               lm90_read_reg(client, LM90_REG_R_TCRIT_HYST, &data->temp_hyst);
+
+               data->valid = false;
+
+               val = lm90_read_reg(client, LM90_REG_R_LOCAL_LOW);
+               if (val < 0)
+                       goto error;
+               data->temp8[LOCAL_LOW] = val;
+
+               val = lm90_read_reg(client, LM90_REG_R_LOCAL_HIGH);
+               if (val < 0)
+                       goto error;
+               data->temp8[LOCAL_HIGH] = val;
 
                if (data->reg_local_ext) {
-                       lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
-                                   data->reg_local_ext,
-                                   &data->temp11[LOCAL_TEMP]);
+                       val = lm90_read16(client, LM90_REG_R_LOCAL_TEMP,
+                                         data->reg_local_ext);
+                       if (val < 0)
+                               goto error;
+                       data->temp11[LOCAL_TEMP] = val;
                } else {
-                       if (lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP,
-                                         &h) == 0)
-                               data->temp11[LOCAL_TEMP] = h << 8;
-               }
-               lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
-                           LM90_REG_R_REMOTE_TEMPL,
-                           &data->temp11[REMOTE_TEMP]);
-
-               if (lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h) == 0) {
-                       data->temp11[REMOTE_LOW] = h << 8;
-                       if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
-                        && lm90_read_reg(client, LM90_REG_R_REMOTE_LOWL,
-                                         &l) == 0)
-                               data->temp11[REMOTE_LOW] |= l;
-               }
-               if (lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h) == 0) {
-                       data->temp11[REMOTE_HIGH] = h << 8;
-                       if ((data->flags & LM90_HAVE_REM_LIMIT_EXT)
-                        && lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHL,
-                                         &l) == 0)
-                               data->temp11[REMOTE_HIGH] |= l;
+                       val = lm90_read_reg(client, LM90_REG_R_LOCAL_TEMP);
+                       if (val < 0)
+                               goto error;
+                       data->temp11[LOCAL_TEMP] = val << 8;
                }
+               val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
+                                 LM90_REG_R_REMOTE_TEMPL);
+               if (val < 0)
+                       goto error;
+               data->temp11[REMOTE_TEMP] = val;
 
-               if (data->flags & LM90_HAVE_OFFSET) {
-                       if (lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSH,
-                                         &h) == 0
-                        && lm90_read_reg(client, LM90_REG_R_REMOTE_OFFSL,
-                                         &l) == 0)
-                               data->temp11[REMOTE_OFFSET] = (h << 8) | l;
-               }
-               if (data->flags & LM90_HAVE_EMERGENCY) {
-                       lm90_read_reg(client, MAX6659_REG_R_LOCAL_EMERG,
-                                     &data->temp8[LOCAL_EMERG]);
-                       lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
-                                     &data->temp8[REMOTE_EMERG]);
-               }
-               lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
-               data->alarms = alarms;  /* save as 16 bit value */
+               val = lm90_read_reg(client, LM90_REG_R_STATUS);
+               if (val < 0)
+                       goto error;
+               data->alarms = val;     /* lower 8 bit of alarms */
 
                if (data->kind == max6696) {
-                       lm90_select_remote_channel(client, data, 1);
-                       lm90_read_reg(client, LM90_REG_R_REMOTE_CRIT,
-                                     &data->temp8[REMOTE2_CRIT]);
-                       lm90_read_reg(client, MAX6659_REG_R_REMOTE_EMERG,
-                                     &data->temp8[REMOTE2_EMERG]);
-                       lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
-                                   LM90_REG_R_REMOTE_TEMPL,
-                                   &data->temp11[REMOTE2_TEMP]);
-                       if (!lm90_read_reg(client, LM90_REG_R_REMOTE_LOWH, &h))
-                               data->temp11[REMOTE2_LOW] = h << 8;
-                       if (!lm90_read_reg(client, LM90_REG_R_REMOTE_HIGHH, &h))
-                               data->temp11[REMOTE2_HIGH] = h << 8;
+                       val = lm90_select_remote_channel(client, data, 1);
+                       if (val < 0)
+                               goto error;
+
+                       val = lm90_read16(client, LM90_REG_R_REMOTE_TEMPH,
+                                         LM90_REG_R_REMOTE_TEMPL);
+                       if (val < 0)
+                               goto error;
+                       data->temp11[REMOTE2_TEMP] = val;
+
                        lm90_select_remote_channel(client, data, 0);
 
-                       if (!lm90_read_reg(client, MAX6696_REG_R_STATUS2,
-                                          &alarms))
-                               data->alarms |= alarms << 8;
+                       val = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
+                       if (val < 0)
+                               goto error;
+                       data->alarms |= val << 8;
                }
 
                /*
                 * Re-enable ALERT# output if it was originally enabled and
                 * relevant alarms are all clear
                 */
-               if ((data->config_orig & 0x80) == 0
-                && (data->alarms & data->alert_alarms) == 0) {
-                       u8 config;
+               if (!(data->config_orig & 0x80) &&
+                   !(data->alarms & data->alert_alarms)) {
+                       val = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+                       if (val < 0)
+                               goto error;
 
-                       lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
-                       if (config & 0x80) {
+                       if (val & 0x80) {
                                dev_dbg(&client->dev, "Re-enabling ALERT#\n");
                                i2c_smbus_write_byte_data(client,
                                                          LM90_REG_W_CONFIG1,
-                                                         config & ~0x80);
+                                                         val & ~0x80);
                        }
                }
 
                data->last_updated = jiffies;
-               data->valid = 1;
+               data->valid = true;
        }
 
+error:
        mutex_unlock(&data->update_lock);
 
+       if (val < 0)
+               return ERR_PTR(val);
+
        return data;
 }
 
@@ -709,16 +790,14 @@ static inline int temp_from_u8_adt7461(struct lm90_data *data, u8 val)
 {
        if (data->flags & LM90_FLAG_ADT7461_EXT)
                return (val - 64) * 1000;
-       else
-               return temp_from_s8(val);
+       return temp_from_s8(val);
 }
 
 static inline int temp_from_u16_adt7461(struct lm90_data *data, u16 val)
 {
        if (data->flags & LM90_FLAG_ADT7461_EXT)
                return (val - 0x4000) / 64 * 250;
-       else
-               return temp_from_s16(val);
+       return temp_from_s16(val);
 }
 
 static u8 temp_to_u8_adt7461(struct lm90_data *data, long val)
@@ -729,13 +808,12 @@ static u8 temp_to_u8_adt7461(struct lm90_data *data, long val)
                if (val >= 191000)
                        return 0xFF;
                return (val + 500 + 64000) / 1000;
-       } else {
-               if (val <= 0)
-                       return 0;
-               if (val >= 127000)
-                       return 127;
-               return (val + 500) / 1000;
        }
+       if (val <= 0)
+               return 0;
+       if (val >= 127000)
+               return 127;
+       return (val + 500) / 1000;
 }
 
 static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
@@ -746,13 +824,12 @@ static u16 temp_to_u16_adt7461(struct lm90_data *data, long val)
                if (val >= 191750)
                        return 0xFFC0;
                return (val + 64000 + 125) / 250 * 64;
-       } else {
-               if (val <= 0)
-                       return 0;
-               if (val >= 127750)
-                       return 0x7FC0;
-               return (val + 125) / 250 * 64;
        }
+       if (val <= 0)
+               return 0;
+       if (val >= 127750)
+               return 0x7FC0;
+       return (val + 125) / 250 * 64;
 }
 
 /*
@@ -766,6 +843,9 @@ static ssize_t show_temp8(struct device *dev, struct device_attribute *devattr,
        struct lm90_data *data = lm90_update_device(dev);
        int temp;
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        if (data->kind == adt7461 || data->kind == tmp451)
                temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
        else if (data->kind == max6646)
@@ -832,6 +912,9 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
        struct lm90_data *data = lm90_update_device(dev);
        int temp;
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        if (data->kind == adt7461 || data->kind == tmp451)
                temp = temp_from_u16_adt7461(data, data->temp11[attr->index]);
        else if (data->kind == max6646)
@@ -907,6 +990,9 @@ static ssize_t show_temphyst(struct device *dev,
        struct lm90_data *data = lm90_update_device(dev);
        int temp;
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        if (data->kind == adt7461 || data->kind == tmp451)
                temp = temp_from_u8_adt7461(data, data->temp8[attr->index]);
        else if (data->kind == max6646)
@@ -953,6 +1039,10 @@ static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
                           char *buf)
 {
        struct lm90_data *data = lm90_update_device(dev);
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        return sprintf(buf, "%d\n", data->alarms);
 }
 
@@ -963,6 +1053,9 @@ static ssize_t show_alarm(struct device *dev, struct device_attribute
        struct lm90_data *data = lm90_update_device(dev);
        int bitnr = attr->index;
 
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
        return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
 }
 
@@ -1404,8 +1497,11 @@ static int lm90_detect(struct i2c_client *client,
        return 0;
 }
 
-static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data)
+static void lm90_restore_conf(void *_data)
 {
+       struct lm90_data *data = _data;
+       struct i2c_client *client = data->client;
+
        /* Restore initial configuration */
        i2c_smbus_write_byte_data(client, LM90_REG_W_CONVRATE,
                                  data->convrate_orig);
@@ -1413,24 +1509,22 @@ static void lm90_restore_conf(struct i2c_client *client, struct lm90_data *data)
                                  data->config_orig);
 }
 
-static void lm90_init_client(struct i2c_client *client, struct lm90_data *data)
+static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
 {
-       u8 config, convrate;
+       int config, convrate;
 
-       if (lm90_read_reg(client, LM90_REG_R_CONVRATE, &convrate) < 0) {
-               dev_warn(&client->dev, "Failed to read convrate register!\n");
-               convrate = LM90_DEF_CONVRATE_RVAL;
-       }
+       convrate = lm90_read_reg(client, LM90_REG_R_CONVRATE);
+       if (convrate < 0)
+               return convrate;
        data->convrate_orig = convrate;
 
        /*
         * Start the conversions.
         */
        lm90_set_convrate(client, data, 500);   /* 500ms; 2Hz conversion rate */
-       if (lm90_read_reg(client, LM90_REG_R_CONFIG1, &config) < 0) {
-               dev_warn(&client->dev, "Initialization failed!\n");
-               return;
-       }
+       config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+       if (config < 0)
+               return config;
        data->config_orig = config;
 
        /* Check Temperature Range Select */
@@ -1456,17 +1550,26 @@ static void lm90_init_client(struct i2c_client *client, struct lm90_data *data)
        config &= 0xBF; /* run */
        if (config != data->config_orig) /* Only write if changed */
                i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
+
+       devm_add_action(&client->dev, lm90_restore_conf, data);
+
+       return 0;
 }
 
 static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
 {
        struct lm90_data *data = i2c_get_clientdata(client);
-       u8 st, st2 = 0;
+       int st, st2 = 0;
 
-       lm90_read_reg(client, LM90_REG_R_STATUS, &st);
+       st = lm90_read_reg(client, LM90_REG_R_STATUS);
+       if (st < 0)
+               return false;
 
-       if (data->kind == max6696)
-               lm90_read_reg(client, MAX6696_REG_R_STATUS2, &st2);
+       if (data->kind == max6696) {
+               st2 = lm90_read_reg(client, MAX6696_REG_R_STATUS2);
+               if (st2 < 0)
+                       return false;
+       }
 
        *status = st | (st2 << 8);
 
@@ -1506,6 +1609,16 @@ static irqreturn_t lm90_irq_thread(int irq, void *dev_id)
                return IRQ_NONE;
 }
 
+static void lm90_remove_pec(void *dev)
+{
+       device_remove_file(dev, &dev_attr_pec);
+}
+
+static void lm90_regulator_disable(void *regulator)
+{
+       regulator_disable(regulator);
+}
+
 static int lm90_probe(struct i2c_client *client,
                      const struct i2c_device_id *id)
 {
@@ -1513,6 +1626,7 @@ static int lm90_probe(struct i2c_client *client,
        struct i2c_adapter *adapter = to_i2c_adapter(dev->parent);
        struct lm90_data *data;
        struct regulator *regulator;
+       struct device *hwmon_dev;
        int groups = 0;
        int err;
 
@@ -1526,6 +1640,8 @@ static int lm90_probe(struct i2c_client *client,
                return err;
        }
 
+       devm_add_action(dev, lm90_regulator_disable, regulator);
+
        data = devm_kzalloc(dev, sizeof(struct lm90_data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -1534,8 +1650,6 @@ static int lm90_probe(struct i2c_client *client,
        i2c_set_clientdata(client, data);
        mutex_init(&data->update_lock);
 
-       data->regulator = regulator;
-
        /* Set the device type */
        data->kind = id->driver_data;
        if (data->kind == adm1032) {
@@ -1557,7 +1671,11 @@ static int lm90_probe(struct i2c_client *client,
        data->max_convrate = lm90_params[data->kind].max_convrate;
 
        /* Initialize the LM90 chip */
-       lm90_init_client(client, data);
+       err = lm90_init_client(client, data);
+       if (err < 0) {
+               dev_err(dev, "Failed to initialize device\n");
+               return err;
+       }
 
        /* Register sysfs hooks */
        data->groups[groups++] = &lm90_group;
@@ -1577,15 +1695,14 @@ static int lm90_probe(struct i2c_client *client,
        if (client->flags & I2C_CLIENT_PEC) {
                err = device_create_file(dev, &dev_attr_pec);
                if (err)
-                       goto exit_restore;
+                       return err;
+               devm_add_action(dev, lm90_remove_pec, dev);
        }
 
-       data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
-                                                           data, data->groups);
-       if (IS_ERR(data->hwmon_dev)) {
-               err = PTR_ERR(data->hwmon_dev);
-               goto exit_remove_pec;
-       }
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          data, data->groups);
+       if (IS_ERR(hwmon_dev))
+               return PTR_ERR(hwmon_dev);
 
        if (client->irq) {
                dev_dbg(dev, "IRQ: %d\n", client->irq);
@@ -1595,32 +1712,10 @@ static int lm90_probe(struct i2c_client *client,
                                                "lm90", client);
                if (err < 0) {
                        dev_err(dev, "cannot request IRQ %d\n", client->irq);
-                       goto exit_unregister;
+                       return err;
                }
        }
 
-       return 0;
-
-exit_unregister:
-       hwmon_device_unregister(data->hwmon_dev);
-exit_remove_pec:
-       device_remove_file(dev, &dev_attr_pec);
-exit_restore:
-       lm90_restore_conf(client, data);
-       regulator_disable(data->regulator);
-
-       return err;
-}
-
-static int lm90_remove(struct i2c_client *client)
-{
-       struct lm90_data *data = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(data->hwmon_dev);
-       device_remove_file(&client->dev, &dev_attr_pec);
-       lm90_restore_conf(client, data);
-       regulator_disable(data->regulator);
-
        return 0;
 }
 
@@ -1636,13 +1731,16 @@ static void lm90_alert(struct i2c_client *client, unsigned int flag)
                 */
                struct lm90_data *data = i2c_get_clientdata(client);
 
-               if ((data->flags & LM90_HAVE_BROKEN_ALERT)
-                && (alarms & data->alert_alarms)) {
-                       u8 config;
+               if ((data->flags & LM90_HAVE_BROKEN_ALERT) &&
+                   (alarms & data->alert_alarms)) {
+                       int config;
+
                        dev_dbg(&client->dev, "Disabling ALERT#\n");
-                       lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
-                       i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
-                                                 config | 0x80);
+                       config = lm90_read_reg(client, LM90_REG_R_CONFIG1);
+                       if (config >= 0)
+                               i2c_smbus_write_byte_data(client,
+                                                         LM90_REG_W_CONFIG1,
+                                                         config | 0x80);
                }
        } else {
                dev_info(&client->dev, "Everything OK\n");
@@ -1655,7 +1753,6 @@ static struct i2c_driver lm90_driver = {
                .name   = "lm90",
        },
        .probe          = lm90_probe,
-       .remove         = lm90_remove,
        .alert          = lm90_alert,
        .id_table       = lm90_id,
        .detect         = lm90_detect,
diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c
new file mode 100644 (file)
index 0000000..b73a488
--- /dev/null
@@ -0,0 +1,775 @@
+/* Sensirion SHT3x-DIS humidity and temperature sensor driver.
+ * The SHT3x comes in many different versions, this driver is for the
+ * I2C version only.
+ *
+ * Copyright (C) 2016 Sensirion AG, Switzerland
+ * Author: David Frey <david.frey@sensirion.com>
+ * Author: Pascal Sachs <pascal.sachs@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/platform_data/sht3x.h>
+
+/* commands (high precision mode) */
+static const unsigned char sht3x_cmd_measure_blocking_hpm[]    = { 0x2c, 0x06 };
+static const unsigned char sht3x_cmd_measure_nonblocking_hpm[] = { 0x24, 0x00 };
+
+/* commands (low power mode) */
+static const unsigned char sht3x_cmd_measure_blocking_lpm[]    = { 0x2c, 0x10 };
+static const unsigned char sht3x_cmd_measure_nonblocking_lpm[] = { 0x24, 0x16 };
+
+/* commands for periodic mode */
+static const unsigned char sht3x_cmd_measure_periodic_mode[]   = { 0xe0, 0x00 };
+static const unsigned char sht3x_cmd_break[]                   = { 0x30, 0x93 };
+
+/* commands for heater control */
+static const unsigned char sht3x_cmd_heater_on[]               = { 0x30, 0x6d };
+static const unsigned char sht3x_cmd_heater_off[]              = { 0x30, 0x66 };
+
+/* other commands */
+static const unsigned char sht3x_cmd_read_status_reg[]         = { 0xf3, 0x2d };
+static const unsigned char sht3x_cmd_clear_status_reg[]        = { 0x30, 0x41 };
+
+/* delays for non-blocking i2c commands, both in us */
+#define SHT3X_NONBLOCKING_WAIT_TIME_HPM  15000
+#define SHT3X_NONBLOCKING_WAIT_TIME_LPM   4000
+
+#define SHT3X_WORD_LEN         2
+#define SHT3X_CMD_LENGTH       2
+#define SHT3X_CRC8_LEN         1
+#define SHT3X_RESPONSE_LENGTH  6
+#define SHT3X_CRC8_POLYNOMIAL  0x31
+#define SHT3X_CRC8_INIT        0xFF
+#define SHT3X_MIN_TEMPERATURE  -45000
+#define SHT3X_MAX_TEMPERATURE  130000
+#define SHT3X_MIN_HUMIDITY     0
+#define SHT3X_MAX_HUMIDITY     100000
+
+enum sht3x_chips {
+       sht3x,
+       sts3x,
+};
+
+enum sht3x_limits {
+       limit_max = 0,
+       limit_max_hyst,
+       limit_min,
+       limit_min_hyst,
+};
+
+DECLARE_CRC8_TABLE(sht3x_crc8_table);
+
+/* periodic measure commands (high precision mode) */
+static const char periodic_measure_commands_hpm[][SHT3X_CMD_LENGTH] = {
+       /* 0.5 measurements per second */
+       {0x20, 0x32},
+       /* 1 measurements per second */
+       {0x21, 0x30},
+       /* 2 measurements per second */
+       {0x22, 0x36},
+       /* 4 measurements per second */
+       {0x23, 0x34},
+       /* 10 measurements per second */
+       {0x27, 0x37},
+};
+
+/* periodic measure commands (low power mode) */
+static const char periodic_measure_commands_lpm[][SHT3X_CMD_LENGTH] = {
+       /* 0.5 measurements per second */
+       {0x20, 0x2f},
+       /* 1 measurements per second */
+       {0x21, 0x2d},
+       /* 2 measurements per second */
+       {0x22, 0x2b},
+       /* 4 measurements per second */
+       {0x23, 0x29},
+       /* 10 measurements per second */
+       {0x27, 0x2a},
+};
+
+struct sht3x_limit_commands {
+       const char read_command[SHT3X_CMD_LENGTH];
+       const char write_command[SHT3X_CMD_LENGTH];
+};
+
+static const struct sht3x_limit_commands limit_commands[] = {
+       /* temp1_max, humidity1_max */
+       [limit_max] = { {0xe1, 0x1f}, {0x61, 0x1d} },
+       /* temp_1_max_hyst, humidity1_max_hyst */
+       [limit_max_hyst] = { {0xe1, 0x14}, {0x61, 0x16} },
+       /* temp1_min, humidity1_min */
+       [limit_min] = { {0xe1, 0x02}, {0x61, 0x00} },
+       /* temp_1_min_hyst, humidity1_min_hyst */
+       [limit_min_hyst] = { {0xe1, 0x09}, {0x61, 0x0B} },
+};
+
+#define SHT3X_NUM_LIMIT_CMD  ARRAY_SIZE(limit_commands)
+
+static const u16 mode_to_update_interval[] = {
+          0,
+       2000,
+       1000,
+        500,
+        250,
+        100,
+};
+
+struct sht3x_data {
+       struct i2c_client *client;
+       struct mutex i2c_lock; /* lock for sending i2c commands */
+       struct mutex data_lock; /* lock for updating driver data */
+
+       u8 mode;
+       const unsigned char *command;
+       u32 wait_time;                  /* in us*/
+       unsigned long last_update;      /* last update in periodic mode*/
+
+       struct sht3x_platform_data setup;
+
+       /*
+        * cached values for temperature and humidity and limits
+        * the limits arrays have the following order:
+        * max, max_hyst, min, min_hyst
+        */
+       int temperature;
+       int temperature_limits[SHT3X_NUM_LIMIT_CMD];
+       u32 humidity;
+       u32 humidity_limits[SHT3X_NUM_LIMIT_CMD];
+};
+
+static u8 get_mode_from_update_interval(u16 value)
+{
+       size_t index;
+       u8 number_of_modes = ARRAY_SIZE(mode_to_update_interval);
+
+       if (value == 0)
+               return 0;
+
+       /* find next faster update interval */
+       for (index = 1; index < number_of_modes; index++) {
+               if (mode_to_update_interval[index] <= value)
+                       return index;
+       }
+
+       return number_of_modes - 1;
+}
+
+static int sht3x_read_from_command(struct i2c_client *client,
+                                  struct sht3x_data *data,
+                                  const char *command,
+                                  char *buf, int length, u32 wait_time)
+{
+       int ret;
+
+       mutex_lock(&data->i2c_lock);
+       ret = i2c_master_send(client, command, SHT3X_CMD_LENGTH);
+
+       if (ret != SHT3X_CMD_LENGTH) {
+               ret = ret < 0 ? ret : -EIO;
+               goto out;
+       }
+
+       if (wait_time)
+               usleep_range(wait_time, wait_time + 1000);
+
+       ret = i2c_master_recv(client, buf, length);
+       if (ret != length) {
+               ret = ret < 0 ? ret : -EIO;
+               goto out;
+       }
+
+       ret = 0;
+out:
+       mutex_unlock(&data->i2c_lock);
+       return ret;
+}
+
+static int sht3x_extract_temperature(u16 raw)
+{
+       /*
+        * From datasheet:
+        * T = -45 + 175 * ST / 2^16
+        * Adapted for integer fixed point (3 digit) arithmetic.
+        */
+       return ((21875 * (int)raw) >> 13) - 45000;
+}
+
+static u32 sht3x_extract_humidity(u16 raw)
+{
+       /*
+        * From datasheet:
+        * RH = 100 * SRH / 2^16
+        * Adapted for integer fixed point (3 digit) arithmetic.
+        */
+       return (12500 * (u32)raw) >> 13;
+}
+
+static struct sht3x_data *sht3x_update_client(struct device *dev)
+{
+       struct sht3x_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       u16 interval_ms = mode_to_update_interval[data->mode];
+       unsigned long interval_jiffies = msecs_to_jiffies(interval_ms);
+       unsigned char buf[SHT3X_RESPONSE_LENGTH];
+       u16 val;
+       int ret = 0;
+
+       mutex_lock(&data->data_lock);
+       /*
+        * Only update cached readings once per update interval in periodic
+        * mode. In single shot mode the sensor measures values on demand, so
+        * every time the sysfs interface is called, a measurement is triggered.
+        * In periodic mode however, the measurement process is handled
+        * internally by the sensor and reading out sensor values only makes
+        * sense if a new reading is available.
+        */
+       if (time_after(jiffies, data->last_update + interval_jiffies)) {
+               ret = sht3x_read_from_command(client, data, data->command, buf,
+                                             sizeof(buf), data->wait_time);
+               if (ret)
+                       goto out;
+
+               val = be16_to_cpup((__be16 *)buf);
+               data->temperature = sht3x_extract_temperature(val);
+               val = be16_to_cpup((__be16 *)(buf + 3));
+               data->humidity = sht3x_extract_humidity(val);
+               data->last_update = jiffies;
+       }
+
+out:
+       mutex_unlock(&data->data_lock);
+       if (ret)
+               return ERR_PTR(ret);
+
+       return data;
+}
+
+/* sysfs attributes */
+static ssize_t temp1_input_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct sht3x_data *data = sht3x_update_client(dev);
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       return sprintf(buf, "%d\n", data->temperature);
+}
+
+static ssize_t humidity1_input_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct sht3x_data *data = sht3x_update_client(dev);
+
+       if (IS_ERR(data))
+               return PTR_ERR(data);
+
+       return sprintf(buf, "%u\n", data->humidity);
+}
+
+/*
+ * limits_update must only be called from probe or with data_lock held
+ */
+static int limits_update(struct sht3x_data *data)
+{
+       int ret;
+       u8 index;
+       int temperature;
+       u32 humidity;
+       u16 raw;
+       char buffer[SHT3X_RESPONSE_LENGTH];
+       const struct sht3x_limit_commands *commands;
+       struct i2c_client *client = data->client;
+
+       for (index = 0; index < SHT3X_NUM_LIMIT_CMD; index++) {
+               commands = &limit_commands[index];
+               ret = sht3x_read_from_command(client, data,
+                                             commands->read_command, buffer,
+                                             SHT3X_RESPONSE_LENGTH, 0);
+
+               if (ret)
+                       return ret;
+
+               raw = be16_to_cpup((__be16 *)buffer);
+               temperature = sht3x_extract_temperature((raw & 0x01ff) << 7);
+               humidity = sht3x_extract_humidity(raw & 0xfe00);
+               data->temperature_limits[index] = temperature;
+               data->humidity_limits[index] = humidity;
+       }
+
+       return ret;
+}
+
+static ssize_t temp1_limit_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       struct sht3x_data *data = dev_get_drvdata(dev);
+       u8 index = to_sensor_dev_attr(attr)->index;
+       int temperature_limit = data->temperature_limits[index];
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", temperature_limit);
+}
+
+static ssize_t humidity1_limit_show(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       struct sht3x_data *data = dev_get_drvdata(dev);
+       u8 index = to_sensor_dev_attr(attr)->index;
+       u32 humidity_limit = data->humidity_limits[index];
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", humidity_limit);
+}
+
+/*
+ * limit_store must only be called with data_lock held
+ */
+static size_t limit_store(struct device *dev,
+                         size_t count,
+                         u8 index,
+                         int temperature,
+                         u32 humidity)
+{
+       char buffer[SHT3X_CMD_LENGTH + SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+       char *position = buffer;
+       int ret;
+       u16 raw;
+       struct sht3x_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       const struct sht3x_limit_commands *commands;
+
+       commands = &limit_commands[index];
+
+       memcpy(position, commands->write_command, SHT3X_CMD_LENGTH);
+       position += SHT3X_CMD_LENGTH;
+       /*
+        * ST = (T + 45) / 175 * 2^16
+        * SRH = RH / 100 * 2^16
+        * adapted for fixed point arithmetic and packed the same as
+        * in limit_show()
+        */
+       raw = ((u32)(temperature + 45000) * 24543) >> (16 + 7);
+       raw |= ((humidity * 42950) >> 16) & 0xfe00;
+
+       *((__be16 *)position) = cpu_to_be16(raw);
+       position += SHT3X_WORD_LEN;
+       *position = crc8(sht3x_crc8_table,
+                        position - SHT3X_WORD_LEN,
+                        SHT3X_WORD_LEN,
+                        SHT3X_CRC8_INIT);
+
+       mutex_lock(&data->i2c_lock);
+       ret = i2c_master_send(client, buffer, sizeof(buffer));
+       mutex_unlock(&data->i2c_lock);
+
+       if (ret != sizeof(buffer))
+               return ret < 0 ? ret : -EIO;
+
+       data->temperature_limits[index] = temperature;
+       data->humidity_limits[index] = humidity;
+       return count;
+}
+
+static ssize_t temp1_limit_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf,
+                                size_t count)
+{
+       int temperature;
+       int ret;
+       struct sht3x_data *data = dev_get_drvdata(dev);
+       u8 index = to_sensor_dev_attr(attr)->index;
+
+       ret = kstrtoint(buf, 0, &temperature);
+       if (ret)
+               return ret;
+
+       temperature = clamp_val(temperature, SHT3X_MIN_TEMPERATURE,
+                               SHT3X_MAX_TEMPERATURE);
+       mutex_lock(&data->data_lock);
+       ret = limit_store(dev, count, index, temperature,
+                         data->humidity_limits[index]);
+       mutex_unlock(&data->data_lock);
+
+       return ret;
+}
+
+static ssize_t humidity1_limit_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf,
+                                    size_t count)
+{
+       u32 humidity;
+       int ret;
+       struct sht3x_data *data = dev_get_drvdata(dev);
+       u8 index = to_sensor_dev_attr(attr)->index;
+
+       ret = kstrtou32(buf, 0, &humidity);
+       if (ret)
+               return ret;
+
+       humidity = clamp_val(humidity, SHT3X_MIN_HUMIDITY, SHT3X_MAX_HUMIDITY);
+       mutex_lock(&data->data_lock);
+       ret = limit_store(dev, count, index, data->temperature_limits[index],
+                         humidity);
+       mutex_unlock(&data->data_lock);
+
+       return ret;
+}
+
+static void sht3x_select_command(struct sht3x_data *data)
+{
+       /*
+        * In blocking mode (clock stretching mode) the I2C bus
+        * is blocked for other traffic, thus the call to i2c_master_recv()
+        * will wait until the data is ready. For non blocking mode, we
+        * have to wait ourselves.
+        */
+       if (data->mode > 0) {
+               data->command = sht3x_cmd_measure_periodic_mode;
+               data->wait_time = 0;
+       } else if (data->setup.blocking_io) {
+               data->command = data->setup.high_precision ?
+                               sht3x_cmd_measure_blocking_hpm :
+                               sht3x_cmd_measure_blocking_lpm;
+               data->wait_time = 0;
+       } else {
+               if (data->setup.high_precision) {
+                       data->command = sht3x_cmd_measure_nonblocking_hpm;
+                       data->wait_time = SHT3X_NONBLOCKING_WAIT_TIME_HPM;
+               } else {
+                       data->command = sht3x_cmd_measure_nonblocking_lpm;
+                       data->wait_time = SHT3X_NONBLOCKING_WAIT_TIME_LPM;
+               }
+       }
+}
+
+static int status_register_read(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buffer, int length)
+{
+       int ret;
+       struct sht3x_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+
+       ret = sht3x_read_from_command(client, data, sht3x_cmd_read_status_reg,
+                                     buffer, length, 0);
+
+       return ret;
+}
+
+static ssize_t temp1_alarm_show(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+       int ret;
+
+       ret = status_register_read(dev, attr, buffer,
+                                  SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
+       if (ret)
+               return ret;
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x04));
+}
+
+static ssize_t humidity1_alarm_show(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+       int ret;
+
+       ret = status_register_read(dev, attr, buffer,
+                                  SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
+       if (ret)
+               return ret;
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x08));
+}
+
+static ssize_t heater_enable_show(struct device *dev,
+                                 struct device_attribute *attr,
+                                 char *buf)
+{
+       char buffer[SHT3X_WORD_LEN + SHT3X_CRC8_LEN];
+       int ret;
+
+       ret = status_register_read(dev, attr, buffer,
+                                  SHT3X_WORD_LEN + SHT3X_CRC8_LEN);
+       if (ret)
+               return ret;
+
+       return scnprintf(buf, PAGE_SIZE, "%d\n", !!(buffer[0] & 0x20));
+}
+
+static ssize_t heater_enable_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf,
+                                  size_t count)
+{
+       struct sht3x_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+       int ret;
+       bool status;
+
+       ret = kstrtobool(buf, &status);
+       if (ret)
+               return ret;
+
+       mutex_lock(&data->i2c_lock);
+
+       if (status)
+               ret = i2c_master_send(client, (char *)&sht3x_cmd_heater_on,
+                                     SHT3X_CMD_LENGTH);
+       else
+               ret = i2c_master_send(client, (char *)&sht3x_cmd_heater_off,
+                                     SHT3X_CMD_LENGTH);
+
+       mutex_unlock(&data->i2c_lock);
+
+       return ret;
+}
+
+static ssize_t update_interval_show(struct device *dev,
+                                   struct device_attribute *attr,
+                                   char *buf)
+{
+       struct sht3x_data *data = dev_get_drvdata(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n",
+                        mode_to_update_interval[data->mode]);
+}
+
+static ssize_t update_interval_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf,
+                                    size_t count)
+{
+       u16 update_interval;
+       u8 mode;
+       int ret;
+       const char *command;
+       struct sht3x_data *data = dev_get_drvdata(dev);
+       struct i2c_client *client = data->client;
+
+       ret = kstrtou16(buf, 0, &update_interval);
+       if (ret)
+               return ret;
+
+       mode = get_mode_from_update_interval(update_interval);
+
+       mutex_lock(&data->data_lock);
+       /* mode did not change */
+       if (mode == data->mode) {
+               mutex_unlock(&data->data_lock);
+               return count;
+       }
+
+       mutex_lock(&data->i2c_lock);
+       /*
+        * Abort periodic measure mode.
+        * To do any changes to the configuration while in periodic mode, we
+        * have to send a break command to the sensor, which then falls back
+        * to single shot (mode = 0).
+        */
+       if (data->mode > 0) {
+               ret = i2c_master_send(client, sht3x_cmd_break,
+                                     SHT3X_CMD_LENGTH);
+               if (ret != SHT3X_CMD_LENGTH)
+                       goto out;
+               data->mode = 0;
+       }
+
+       if (mode > 0) {
+               if (data->setup.high_precision)
+                       command = periodic_measure_commands_hpm[mode - 1];
+               else
+                       command = periodic_measure_commands_lpm[mode - 1];
+
+               /* select mode */
+               ret = i2c_master_send(client, command, SHT3X_CMD_LENGTH);
+               if (ret != SHT3X_CMD_LENGTH)
+                       goto out;
+       }
+
+       /* select mode and command */
+       data->mode = mode;
+       sht3x_select_command(data);
+
+out:
+       mutex_unlock(&data->i2c_lock);
+       mutex_unlock(&data->data_lock);
+       if (ret != SHT3X_CMD_LENGTH)
+               return ret < 0 ? ret : -EIO;
+
+       return count;
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, temp1_input_show, NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, humidity1_input_show,
+                         NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR,
+                         temp1_limit_show, temp1_limit_store,
+                         limit_max);
+static SENSOR_DEVICE_ATTR(humidity1_max, S_IRUGO | S_IWUSR,
+                         humidity1_limit_show, humidity1_limit_store,
+                         limit_max);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR,
+                         temp1_limit_show, temp1_limit_store,
+                         limit_max_hyst);
+static SENSOR_DEVICE_ATTR(humidity1_max_hyst, S_IRUGO | S_IWUSR,
+                         humidity1_limit_show, humidity1_limit_store,
+                         limit_max_hyst);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR,
+                         temp1_limit_show, temp1_limit_store,
+                         limit_min);
+static SENSOR_DEVICE_ATTR(humidity1_min, S_IRUGO | S_IWUSR,
+                         humidity1_limit_show, humidity1_limit_store,
+                         limit_min);
+static SENSOR_DEVICE_ATTR(temp1_min_hyst, S_IRUGO | S_IWUSR,
+                         temp1_limit_show, temp1_limit_store,
+                         limit_min_hyst);
+static SENSOR_DEVICE_ATTR(humidity1_min_hyst, S_IRUGO | S_IWUSR,
+                         humidity1_limit_show, humidity1_limit_store,
+                         limit_min_hyst);
+static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, temp1_alarm_show, NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_alarm, S_IRUGO, humidity1_alarm_show,
+                         NULL, 0);
+static SENSOR_DEVICE_ATTR(heater_enable, S_IRUGO | S_IWUSR,
+                         heater_enable_show, heater_enable_store, 0);
+static SENSOR_DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR,
+                         update_interval_show, update_interval_store, 0);
+
+static struct attribute *sht3x_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       &sensor_dev_attr_humidity1_input.dev_attr.attr,
+       &sensor_dev_attr_temp1_max.dev_attr.attr,
+       &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+       &sensor_dev_attr_humidity1_max.dev_attr.attr,
+       &sensor_dev_attr_humidity1_max_hyst.dev_attr.attr,
+       &sensor_dev_attr_temp1_min.dev_attr.attr,
+       &sensor_dev_attr_temp1_min_hyst.dev_attr.attr,
+       &sensor_dev_attr_humidity1_min.dev_attr.attr,
+       &sensor_dev_attr_humidity1_min_hyst.dev_attr.attr,
+       &sensor_dev_attr_temp1_alarm.dev_attr.attr,
+       &sensor_dev_attr_humidity1_alarm.dev_attr.attr,
+       &sensor_dev_attr_heater_enable.dev_attr.attr,
+       &sensor_dev_attr_update_interval.dev_attr.attr,
+       NULL
+};
+
+static struct attribute *sts3x_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       NULL
+};
+
+ATTRIBUTE_GROUPS(sht3x);
+ATTRIBUTE_GROUPS(sts3x);
+
+static int sht3x_probe(struct i2c_client *client,
+                      const struct i2c_device_id *id)
+{
+       int ret;
+       struct sht3x_data *data;
+       struct device *hwmon_dev;
+       struct i2c_adapter *adap = client->adapter;
+       struct device *dev = &client->dev;
+       const struct attribute_group **attribute_groups;
+
+       /*
+        * we require full i2c support since the sht3x uses multi-byte read and
+        * writes as well as multi-byte commands which are not supported by
+        * the smbus protocol
+        */
+       if (!i2c_check_functionality(adap, I2C_FUNC_I2C))
+               return -ENODEV;
+
+       ret = i2c_master_send(client, sht3x_cmd_clear_status_reg,
+                             SHT3X_CMD_LENGTH);
+       if (ret != SHT3X_CMD_LENGTH)
+               return ret < 0 ? ret : -ENODEV;
+
+       data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       data->setup.blocking_io = false;
+       data->setup.high_precision = true;
+       data->mode = 0;
+       data->last_update = 0;
+       data->client = client;
+       crc8_populate_msb(sht3x_crc8_table, SHT3X_CRC8_POLYNOMIAL);
+
+       if (client->dev.platform_data)
+               data->setup = *(struct sht3x_platform_data *)dev->platform_data;
+
+       sht3x_select_command(data);
+
+       mutex_init(&data->i2c_lock);
+       mutex_init(&data->data_lock);
+
+       ret = limits_update(data);
+       if (ret)
+               return ret;
+
+       if (id->driver_data == sts3x)
+               attribute_groups = sts3x_groups;
+       else
+               attribute_groups = sht3x_groups;
+
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev,
+                                                          client->name,
+                                                          data,
+                                                          attribute_groups);
+
+       if (IS_ERR(hwmon_dev))
+               dev_dbg(dev, "unable to register hwmon device\n");
+
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+/* device ID table */
+static const struct i2c_device_id sht3x_ids[] = {
+       {"sht3x", sht3x},
+       {"sts3x", sts3x},
+       {}
+};
+
+MODULE_DEVICE_TABLE(i2c, sht3x_ids);
+
+static struct i2c_driver sht3x_i2c_driver = {
+       .driver.name = "sht3x",
+       .probe       = sht3x_probe,
+       .id_table    = sht3x_ids,
+};
+
+module_i2c_driver(sht3x_i2c_driver);
+
+MODULE_AUTHOR("David Frey <david.frey@sensirion.com>");
+MODULE_AUTHOR("Pascal Sachs <pascal.sachs@sensirion.com>");
+MODULE_DESCRIPTION("Sensirion SHT3x humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
index f1e96fd7f445bff92965c286b188a2ec253b0813..a942a2574a4d7dfaa4229f61a48d37905db2b5c3 100644 (file)
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
  */
 
+#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -27,6 +24,7 @@
 #include <linux/mutex.h>
 #include <linux/device.h>
 #include <linux/jiffies.h>
+#include <linux/regmap.h>
 #include <linux/thermal.h>
 #include <linux/of.h>
 
 #define        TMP102_TLOW_REG                 0x02
 #define        TMP102_THIGH_REG                0x03
 
+#define TMP102_CONFREG_MASK    (TMP102_CONF_SD | TMP102_CONF_TM | \
+                                TMP102_CONF_POL | TMP102_CONF_F0 | \
+                                TMP102_CONF_F1 | TMP102_CONF_OS | \
+                                TMP102_CONF_EM | TMP102_CONF_AL | \
+                                TMP102_CONF_CR0 | TMP102_CONF_CR1)
+
+#define TMP102_CONFIG_CLEAR    (TMP102_CONF_SD | TMP102_CONF_OS | \
+                                TMP102_CONF_CR0)
+#define TMP102_CONFIG_SET      (TMP102_CONF_TM | TMP102_CONF_EM | \
+                                TMP102_CONF_CR1)
+
+#define CONVERSION_TIME_MS             35      /* in milli-seconds */
+
 struct tmp102 {
-       struct i2c_client *client;
-       struct device *hwmon_dev;
-       struct mutex lock;
+       struct regmap *regmap;
        u16 config_orig;
-       unsigned long last_update;
-       int temp[3];
-       bool first_time;
+       unsigned long ready_time;
 };
 
 /* convert left adjusted 13-bit TMP102 register value to milliCelsius */
@@ -72,44 +79,22 @@ static inline u16 tmp102_mC_to_reg(int val)
        return (val * 128) / 1000;
 }
 
-static const u8 tmp102_reg[] = {
-       TMP102_TEMP_REG,
-       TMP102_TLOW_REG,
-       TMP102_THIGH_REG,
-};
-
-static struct tmp102 *tmp102_update_device(struct device *dev)
-{
-       struct tmp102 *tmp102 = dev_get_drvdata(dev);
-       struct i2c_client *client = tmp102->client;
-
-       mutex_lock(&tmp102->lock);
-       if (time_after(jiffies, tmp102->last_update + HZ / 3)) {
-               int i;
-               for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) {
-                       int status = i2c_smbus_read_word_swapped(client,
-                                                                tmp102_reg[i]);
-                       if (status > -1)
-                               tmp102->temp[i] = tmp102_reg_to_mC(status);
-               }
-               tmp102->last_update = jiffies;
-               tmp102->first_time = false;
-       }
-       mutex_unlock(&tmp102->lock);
-       return tmp102;
-}
-
 static int tmp102_read_temp(void *dev, int *temp)
 {
-       struct tmp102 *tmp102 = tmp102_update_device(dev);
+       struct tmp102 *tmp102 = dev_get_drvdata(dev);
+       unsigned int reg;
+       int ret;
 
-       /* Is it too early even to return a conversion? */
-       if (tmp102->first_time) {
+       if (time_before(jiffies, tmp102->ready_time)) {
                dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
                return -EAGAIN;
        }
 
-       *temp = tmp102->temp[0];
+       ret = regmap_read(tmp102->regmap, TMP102_TEMP_REG, &reg);
+       if (ret < 0)
+               return ret;
+
+       *temp = tmp102_reg_to_mC(reg);
 
        return 0;
 }
@@ -119,13 +104,20 @@ static ssize_t tmp102_show_temp(struct device *dev,
                                char *buf)
 {
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
-       struct tmp102 *tmp102 = tmp102_update_device(dev);
+       struct tmp102 *tmp102 = dev_get_drvdata(dev);
+       int regaddr = sda->index;
+       unsigned int reg;
+       int err;
 
-       /* Is it too early even to return a read? */
-       if (tmp102->first_time)
+       if (regaddr == TMP102_TEMP_REG &&
+           time_before(jiffies, tmp102->ready_time))
                return -EAGAIN;
 
-       return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
+       err = regmap_read(tmp102->regmap, regaddr, &reg);
+       if (err < 0)
+               return err;
+
+       return sprintf(buf, "%d\n", tmp102_reg_to_mC(reg));
 }
 
 static ssize_t tmp102_set_temp(struct device *dev,
@@ -134,29 +126,26 @@ static ssize_t tmp102_set_temp(struct device *dev,
 {
        struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
        struct tmp102 *tmp102 = dev_get_drvdata(dev);
-       struct i2c_client *client = tmp102->client;
+       int reg = sda->index;
        long val;
-       int status;
+       int err;
 
        if (kstrtol(buf, 10, &val) < 0)
                return -EINVAL;
        val = clamp_val(val, -256000, 255000);
 
-       mutex_lock(&tmp102->lock);
-       tmp102->temp[sda->index] = val;
-       status = i2c_smbus_write_word_swapped(client, tmp102_reg[sda->index],
-                                             tmp102_mC_to_reg(val));
-       mutex_unlock(&tmp102->lock);
-       return status ? : count;
+       err = regmap_write(tmp102->regmap, reg, tmp102_mC_to_reg(val));
+       return err ? : count;
 }
 
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL,
+                         TMP102_TEMP_REG);
 
 static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp,
-                         tmp102_set_temp, 1);
+                         tmp102_set_temp, TMP102_TLOW_REG);
 
 static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp,
-                         tmp102_set_temp, 2);
+                         tmp102_set_temp, TMP102_THIGH_REG);
 
 static struct attribute *tmp102_attrs[] = {
        &sensor_dev_attr_temp1_input.dev_attr.attr,
@@ -166,20 +155,46 @@ static struct attribute *tmp102_attrs[] = {
 };
 ATTRIBUTE_GROUPS(tmp102);
 
-#define TMP102_CONFIG  (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1)
-#define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL)
-
 static const struct thermal_zone_of_device_ops tmp102_of_thermal_ops = {
        .get_temp = tmp102_read_temp,
 };
 
+static void tmp102_restore_config(void *data)
+{
+       struct tmp102 *tmp102 = data;
+
+       regmap_write(tmp102->regmap, TMP102_CONF_REG, tmp102->config_orig);
+}
+
+static bool tmp102_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+       return reg != TMP102_TEMP_REG;
+}
+
+static bool tmp102_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       return reg == TMP102_TEMP_REG;
+}
+
+static const struct regmap_config tmp102_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 16,
+       .max_register = TMP102_THIGH_REG,
+       .writeable_reg = tmp102_is_writeable_reg,
+       .volatile_reg = tmp102_is_volatile_reg,
+       .val_format_endian = REGMAP_ENDIAN_BIG,
+       .cache_type = REGCACHE_RBTREE,
+       .use_single_rw = true,
+};
+
 static int tmp102_probe(struct i2c_client *client,
                                  const struct i2c_device_id *id)
 {
        struct device *dev = &client->dev;
        struct device *hwmon_dev;
        struct tmp102 *tmp102;
-       int status;
+       unsigned int regval;
+       int err;
 
        if (!i2c_check_functionality(client->adapter,
                                     I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -193,73 +208,57 @@ static int tmp102_probe(struct i2c_client *client,
                return -ENOMEM;
 
        i2c_set_clientdata(client, tmp102);
-       tmp102->client = client;
 
-       status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
-       if (status < 0) {
+       tmp102->regmap = devm_regmap_init_i2c(client, &tmp102_regmap_config);
+       if (IS_ERR(tmp102->regmap))
+               return PTR_ERR(tmp102->regmap);
+
+       err = regmap_read(tmp102->regmap, TMP102_CONF_REG, &regval);
+       if (err < 0) {
                dev_err(dev, "error reading config register\n");
-               return status;
+               return err;
        }
-       tmp102->config_orig = status;
-       status = i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
-                                             TMP102_CONFIG);
-       if (status < 0) {
-               dev_err(dev, "error writing config register\n");
-               goto fail_restore_config;
+
+       if ((regval & ~TMP102_CONFREG_MASK) !=
+           (TMP102_CONF_R0 | TMP102_CONF_R1)) {
+               dev_err(dev, "unexpected config register value\n");
+               return -ENODEV;
        }
-       status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
-       if (status < 0) {
-               dev_err(dev, "error reading config register\n");
-               goto fail_restore_config;
+
+       tmp102->config_orig = regval;
+
+       devm_add_action(dev, tmp102_restore_config, tmp102);
+
+       regval &= ~TMP102_CONFIG_CLEAR;
+       regval |= TMP102_CONFIG_SET;
+
+       err = regmap_write(tmp102->regmap, TMP102_CONF_REG, regval);
+       if (err < 0) {
+               dev_err(dev, "error writing config register\n");
+               return err;
        }
-       status &= ~TMP102_CONFIG_RD_ONLY;
-       if (status != TMP102_CONFIG) {
-               dev_err(dev, "config settings did not stick\n");
-               status = -ENODEV;
-               goto fail_restore_config;
+
+       tmp102->ready_time = jiffies;
+       if (tmp102->config_orig & TMP102_CONF_SD) {
+               /*
+                * Mark that we are not ready with data until the first
+                * conversion is complete
+                */
+               tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS);
        }
-       tmp102->last_update = jiffies;
-       /* Mark that we are not ready with data until conversion is complete */
-       tmp102->first_time = true;
-       mutex_init(&tmp102->lock);
 
-       hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
-                                                     tmp102, tmp102_groups);
+       hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
+                                                          tmp102,
+                                                          tmp102_groups);
        if (IS_ERR(hwmon_dev)) {
                dev_dbg(dev, "unable to register hwmon device\n");
-               status = PTR_ERR(hwmon_dev);
-               goto fail_restore_config;
+               return PTR_ERR(hwmon_dev);
        }
-       tmp102->hwmon_dev = hwmon_dev;
        devm_thermal_zone_of_sensor_register(hwmon_dev, 0, hwmon_dev,
                                             &tmp102_of_thermal_ops);
 
        dev_info(dev, "initialized\n");
 
-       return 0;
-
-fail_restore_config:
-       i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
-                                    tmp102->config_orig);
-       return status;
-}
-
-static int tmp102_remove(struct i2c_client *client)
-{
-       struct tmp102 *tmp102 = i2c_get_clientdata(client);
-
-       hwmon_device_unregister(tmp102->hwmon_dev);
-
-       /* Stop monitoring if device was stopped originally */
-       if (tmp102->config_orig & TMP102_CONF_SD) {
-               int config;
-
-               config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
-               if (config >= 0)
-                       i2c_smbus_write_word_swapped(client, TMP102_CONF_REG,
-                                                    config | TMP102_CONF_SD);
-       }
-
        return 0;
 }
 
@@ -267,27 +266,24 @@ static int tmp102_remove(struct i2c_client *client)
 static int tmp102_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       int config;
-
-       config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
-       if (config < 0)
-               return config;
+       struct tmp102 *tmp102 = i2c_get_clientdata(client);
 
-       config |= TMP102_CONF_SD;
-       return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
+       return regmap_update_bits(tmp102->regmap, TMP102_CONF_REG,
+                                 TMP102_CONF_SD, TMP102_CONF_SD);
 }
 
 static int tmp102_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       int config;
+       struct tmp102 *tmp102 = i2c_get_clientdata(client);
+       int err;
+
+       err = regmap_update_bits(tmp102->regmap, TMP102_CONF_REG,
+                                TMP102_CONF_SD, 0);
 
-       config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG);
-       if (config < 0)
-               return config;
+       tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS);
 
-       config &= ~TMP102_CONF_SD;
-       return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config);
+       return err;
 }
 #endif /* CONFIG_PM */
 
@@ -303,7 +299,6 @@ static struct i2c_driver tmp102_driver = {
        .driver.name    = DRIVER_NAME,
        .driver.pm      = &tmp102_dev_pm_ops,
        .probe          = tmp102_probe,
-       .remove         = tmp102_remove,
        .id_table       = tmp102_id,
 };
 
index ccf4cffe0ee1dfac282b9afc6e28da5340aa51f3..eeeed2c7d0816c481df22588ec0cdcf709426cd6 100644 (file)
@@ -47,7 +47,7 @@
 static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
        0x4e, 0x4f, I2C_CLIENT_END };
 
-enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
+enum chips { tmp401, tmp411, tmp431, tmp432, tmp435, tmp461 };
 
 /*
  * The TMP401 registers, note some registers have different addresses for
@@ -62,31 +62,34 @@ enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
 #define TMP401_MANUFACTURER_ID_REG             0xFE
 #define TMP401_DEVICE_ID_REG                   0xFF
 
-static const u8 TMP401_TEMP_MSB_READ[6][2] = {
+static const u8 TMP401_TEMP_MSB_READ[7][2] = {
        { 0x00, 0x01 }, /* temp */
        { 0x06, 0x08 }, /* low limit */
        { 0x05, 0x07 }, /* high limit */
        { 0x20, 0x19 }, /* therm (crit) limit */
        { 0x30, 0x34 }, /* lowest */
        { 0x32, 0x36 }, /* highest */
+       { 0, 0x11 },    /* offset */
 };
 
-static const u8 TMP401_TEMP_MSB_WRITE[6][2] = {
+static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
        { 0, 0 },       /* temp (unused) */
        { 0x0C, 0x0E }, /* low limit */
        { 0x0B, 0x0D }, /* high limit */
        { 0x20, 0x19 }, /* therm (crit) limit */
        { 0x30, 0x34 }, /* lowest */
        { 0x32, 0x36 }, /* highest */
+       { 0, 0x11 },    /* offset */
 };
 
-static const u8 TMP401_TEMP_LSB[6][2] = {
+static const u8 TMP401_TEMP_LSB[7][2] = {
        { 0x15, 0x10 }, /* temp */
        { 0x17, 0x14 }, /* low limit */
        { 0x16, 0x13 }, /* high limit */
        { 0, 0 },       /* therm (crit) limit (unused) */
        { 0x31, 0x35 }, /* lowest */
        { 0x33, 0x37 }, /* highest */
+       { 0, 0x12 },    /* offset */
 };
 
 static const u8 TMP432_TEMP_MSB_READ[4][3] = {
@@ -149,6 +152,7 @@ static const struct i2c_device_id tmp401_id[] = {
        { "tmp431", tmp431 },
        { "tmp432", tmp432 },
        { "tmp435", tmp435 },
+       { "tmp461", tmp461 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, tmp401_id);
@@ -170,7 +174,7 @@ struct tmp401_data {
        /* register values */
        u8 status[4];
        u8 config;
-       u16 temp[6][3];
+       u16 temp[7][3];
        u8 temp_crit_hyst;
 };
 
@@ -612,6 +616,22 @@ static const struct attribute_group tmp432_group = {
        .attrs = tmp432_attributes,
 };
 
+/*
+ * Additional features of the TMP461 chip.
+ * The TMP461 temperature offset for the remote channel.
+ */
+static SENSOR_DEVICE_ATTR_2(temp2_offset, S_IWUSR | S_IRUGO, show_temp,
+                           store_temp, 6, 1);
+
+static struct attribute *tmp461_attributes[] = {
+       &sensor_dev_attr_temp2_offset.dev_attr.attr,
+       NULL
+};
+
+static const struct attribute_group tmp461_group = {
+       .attrs = tmp461_attributes,
+};
+
 /*
  * Begin non sysfs callback code (aka Real code)
  */
@@ -714,7 +734,7 @@ static int tmp401_probe(struct i2c_client *client,
                        const struct i2c_device_id *id)
 {
        static const char * const names[] = {
-               "TMP401", "TMP411", "TMP431", "TMP432", "TMP435"
+               "TMP401", "TMP411", "TMP431", "TMP432", "TMP435", "TMP461"
        };
        struct device *dev = &client->dev;
        struct device *hwmon_dev;
@@ -745,6 +765,9 @@ static int tmp401_probe(struct i2c_client *client,
        if (data->kind == tmp432)
                data->groups[groups++] = &tmp432_group;
 
+       if (data->kind == tmp461)
+               data->groups[groups++] = &tmp461_group;
+
        hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
                                                           data, data->groups);
        if (IS_ERR(hwmon_dev))
index 1be543e8e42fc12b0e2e3bb5d335497a2c917b4c..6f0a51a2c6ec6df69d6761057c4747c419bffbdc 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/debugfs.h>
 #include <linux/idr.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/dma-mapping.h>
 
 #include "intel_th.h"
@@ -67,23 +68,33 @@ static int intel_th_probe(struct device *dev)
 
        hubdrv = to_intel_th_driver(hub->dev.driver);
 
+       pm_runtime_set_active(dev);
+       pm_runtime_no_callbacks(dev);
+       pm_runtime_enable(dev);
+
        ret = thdrv->probe(to_intel_th_device(dev));
        if (ret)
-               return ret;
+               goto out_pm;
 
        if (thdrv->attr_group) {
                ret = sysfs_create_group(&thdev->dev.kobj, thdrv->attr_group);
-               if (ret) {
-                       thdrv->remove(thdev);
-
-                       return ret;
-               }
+               if (ret)
+                       goto out;
        }
 
        if (thdev->type == INTEL_TH_OUTPUT &&
            !intel_th_output_assigned(thdev))
+               /* does not talk to hardware */
                ret = hubdrv->assign(hub, thdev);
 
+out:
+       if (ret)
+               thdrv->remove(thdev);
+
+out_pm:
+       if (ret)
+               pm_runtime_disable(dev);
+
        return ret;
 }
 
@@ -103,6 +114,8 @@ static int intel_th_remove(struct device *dev)
        if (thdrv->attr_group)
                sysfs_remove_group(&thdev->dev.kobj, thdrv->attr_group);
 
+       pm_runtime_get_sync(dev);
+
        thdrv->remove(thdev);
 
        if (intel_th_output_assigned(thdev)) {
@@ -110,9 +123,14 @@ static int intel_th_remove(struct device *dev)
                        to_intel_th_driver(dev->parent->driver);
 
                if (hub->dev.driver)
+                       /* does not talk to hardware */
                        hubdrv->unassign(hub, thdev);
        }
 
+       pm_runtime_disable(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
        return 0;
 }
 
@@ -185,6 +203,7 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
 {
        struct intel_th_driver *thdrv =
                to_intel_th_driver_or_null(thdev->dev.driver);
+       int ret = 0;
 
        if (!thdrv)
                return -ENODEV;
@@ -192,12 +211,17 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
        if (!try_module_get(thdrv->driver.owner))
                return -ENODEV;
 
+       pm_runtime_get_sync(&thdev->dev);
+
        if (thdrv->activate)
-               return thdrv->activate(thdev);
+               ret = thdrv->activate(thdev);
+       else
+               intel_th_trace_enable(thdev);
 
-       intel_th_trace_enable(thdev);
+       if (ret)
+               pm_runtime_put(&thdev->dev);
 
-       return 0;
+       return ret;
 }
 
 static void intel_th_output_deactivate(struct intel_th_device *thdev)
@@ -213,6 +237,7 @@ static void intel_th_output_deactivate(struct intel_th_device *thdev)
        else
                intel_th_trace_disable(thdev);
 
+       pm_runtime_put(&thdev->dev);
        module_put(thdrv->driver.owner);
 }
 
@@ -465,6 +490,38 @@ static struct intel_th_subdevice {
        },
 };
 
+#ifdef CONFIG_MODULES
+static void __intel_th_request_hub_module(struct work_struct *work)
+{
+       struct intel_th *th = container_of(work, struct intel_th,
+                                          request_module_work);
+
+       request_module("intel_th_%s", th->hub->name);
+}
+
+static int intel_th_request_hub_module(struct intel_th *th)
+{
+       INIT_WORK(&th->request_module_work, __intel_th_request_hub_module);
+       schedule_work(&th->request_module_work);
+
+       return 0;
+}
+
+static void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+       flush_work(&th->request_module_work);
+}
+#else
+static inline int intel_th_request_hub_module(struct intel_th *th)
+{
+       return -EINVAL;
+}
+
+static inline void intel_th_request_hub_module_flush(struct intel_th *th)
+{
+}
+#endif /* CONFIG_MODULES */
+
 static int intel_th_populate(struct intel_th *th, struct resource *devres,
                             unsigned int ndevres, int irq)
 {
@@ -535,7 +592,7 @@ static int intel_th_populate(struct intel_th *th, struct resource *devres,
                /* need switch driver to be loaded to enumerate the rest */
                if (subdev->type == INTEL_TH_SWITCH && !req) {
                        th->hub = thdev;
-                       err = request_module("intel_th_%s", subdev->name);
+                       err = intel_th_request_hub_module(th);
                        if (!err)
                                req++;
                }
@@ -628,6 +685,10 @@ intel_th_alloc(struct device *dev, struct resource *devres,
 
        dev_set_drvdata(dev, th);
 
+       pm_runtime_no_callbacks(dev);
+       pm_runtime_put(dev);
+       pm_runtime_allow(dev);
+
        err = intel_th_populate(th, devres, ndevres, irq);
        if (err)
                goto err_chrdev;
@@ -635,6 +696,8 @@ intel_th_alloc(struct device *dev, struct resource *devres,
        return th;
 
 err_chrdev:
+       pm_runtime_forbid(dev);
+
        __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
                            "intel_th/output");
 
@@ -652,12 +715,16 @@ void intel_th_free(struct intel_th *th)
 {
        int i;
 
+       intel_th_request_hub_module_flush(th);
        for (i = 0; i < TH_SUBDEVICE_MAX; i++)
                if (th->thdev[i] != th->hub)
                        intel_th_device_remove(th->thdev[i]);
 
        intel_th_device_remove(th->hub);
 
+       pm_runtime_get_sync(th->dev);
+       pm_runtime_forbid(th->dev);
+
        __unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
                            "intel_th/output");
 
@@ -682,6 +749,7 @@ int intel_th_trace_enable(struct intel_th_device *thdev)
        if (WARN_ON_ONCE(thdev->type != INTEL_TH_OUTPUT))
                return -EINVAL;
 
+       pm_runtime_get_sync(&thdev->dev);
        hubdrv->enable(hub, &thdev->output);
 
        return 0;
@@ -702,6 +770,7 @@ int intel_th_trace_disable(struct intel_th_device *thdev)
                return -EINVAL;
 
        hubdrv->disable(hub, &thdev->output);
+       pm_runtime_put(&thdev->dev);
 
        return 0;
 }
index 9beea0b54231f720bea4570ad56a6a54665b55f6..33e09369a49165d8d7a5ca7541b15f9251eecdd7 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/bitmap.h>
+#include <linux/pm_runtime.h>
 
 #include "intel_th.h"
 #include "gth.h"
@@ -190,6 +191,11 @@ static ssize_t master_attr_store(struct device *dev,
        if (old_port >= 0) {
                gth->master[ma->master] = -1;
                clear_bit(ma->master, gth->output[old_port].master);
+
+               /*
+                * if the port is active, program this setting,
+                * implies that runtime PM is on
+                */
                if (gth->output[old_port].output->active)
                        gth_master_set(gth, ma->master, -1);
        }
@@ -204,7 +210,7 @@ static ssize_t master_attr_store(struct device *dev,
 
                set_bit(ma->master, gth->output[port].master);
 
-               /* if the port is active, program this setting */
+               /* if the port is active, program this setting, see above */
                if (gth->output[port].output->active)
                        gth_master_set(gth, ma->master, port);
        }
@@ -326,11 +332,15 @@ static ssize_t output_attr_show(struct device *dev,
        struct gth_device *gth = oa->gth;
        size_t count;
 
+       pm_runtime_get_sync(dev);
+
        spin_lock(&gth->gth_lock);
        count = snprintf(buf, PAGE_SIZE, "%x\n",
                         gth_output_parm_get(gth, oa->port, oa->parm));
        spin_unlock(&gth->gth_lock);
 
+       pm_runtime_put(dev);
+
        return count;
 }
 
@@ -346,10 +356,14 @@ static ssize_t output_attr_store(struct device *dev,
        if (kstrtouint(buf, 16, &config) < 0)
                return -EINVAL;
 
+       pm_runtime_get_sync(dev);
+
        spin_lock(&gth->gth_lock);
        gth_output_parm_set(gth, oa->port, oa->parm, config);
        spin_unlock(&gth->gth_lock);
 
+       pm_runtime_put(dev);
+
        return count;
 }
 
@@ -451,7 +465,7 @@ static int intel_th_output_attributes(struct gth_device *gth)
 }
 
 /**
- * intel_th_gth_disable() - enable tracing to an output device
+ * intel_th_gth_disable() - disable tracing to an output device
  * @thdev:     GTH device
  * @output:    output device's descriptor
  *
index 0df22e30673d9ac876d28379d2240ca80d4d21fb..4c195786bf1f21592023fbf34b82de6b5123116f 100644 (file)
@@ -114,6 +114,9 @@ intel_th_output_assigned(struct intel_th_device *thdev)
  * @unassign:  deassociate an output type device from an output port
  * @enable:    enable tracing for a given output device
  * @disable:   disable tracing for a given output device
+ * @irq:       interrupt callback
+ * @activate:  enable tracing on the output's side
+ * @deactivate:        disable tracing on the output's side
  * @fops:      file operations for device nodes
  * @attr_group:        attributes provided by the driver
  *
@@ -205,6 +208,9 @@ struct intel_th {
 
        int                     id;
        int                     major;
+#ifdef CONFIG_MODULES
+       struct work_struct      request_module_work;
+#endif /* CONFIG_MODULES */
 #ifdef CONFIG_INTEL_TH_DEBUG
        struct dentry           *dbg;
 #endif
index 5e25c7eb31d3928b70d5f9ce1bb27a79b6703d00..0bba3842336e6d6b9cb9a2b78ca91d7ff2712dcd 100644 (file)
@@ -80,6 +80,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1a8e),
                .driver_data = (kernel_ulong_t)0,
        },
+       {
+               /* Kaby Lake PCH-H */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
+               .driver_data = (kernel_ulong_t)0,
+       },
        { 0 },
 };
 
index ff31108b066f6a7e6b894bae91078639ca7e4e62..51f81d64ca37f963b770e05227c74cb14c90498a 100644 (file)
@@ -15,6 +15,7 @@
  * as defined in MIPI STPv2 specification.
  */
 
+#include <linux/pm_runtime.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -482,14 +483,40 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
                return -EFAULT;
        }
 
+       pm_runtime_get_sync(&stm->dev);
+
        count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
                          kbuf, count);
 
+       pm_runtime_mark_last_busy(&stm->dev);
+       pm_runtime_put_autosuspend(&stm->dev);
        kfree(kbuf);
 
        return count;
 }
 
+static void stm_mmap_open(struct vm_area_struct *vma)
+{
+       struct stm_file *stmf = vma->vm_file->private_data;
+       struct stm_device *stm = stmf->stm;
+
+       pm_runtime_get(&stm->dev);
+}
+
+static void stm_mmap_close(struct vm_area_struct *vma)
+{
+       struct stm_file *stmf = vma->vm_file->private_data;
+       struct stm_device *stm = stmf->stm;
+
+       pm_runtime_mark_last_busy(&stm->dev);
+       pm_runtime_put_autosuspend(&stm->dev);
+}
+
+static const struct vm_operations_struct stm_mmap_vmops = {
+       .open   = stm_mmap_open,
+       .close  = stm_mmap_close,
+};
+
 static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct stm_file *stmf = file->private_data;
@@ -514,8 +541,11 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
        if (!phys)
                return -EINVAL;
 
+       pm_runtime_get_sync(&stm->dev);
+
        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
        vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_ops = &stm_mmap_vmops;
        vm_iomap_memory(vma, phys, size);
 
        return 0;
@@ -701,6 +731,17 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
        if (err)
                goto err_device;
 
+       /*
+        * Use delayed autosuspend to avoid bouncing back and forth
+        * on recurring character device writes, with the initial
+        * delay time of 2 seconds.
+        */
+       pm_runtime_no_callbacks(&stm->dev);
+       pm_runtime_use_autosuspend(&stm->dev);
+       pm_runtime_set_autosuspend_delay(&stm->dev, 2000);
+       pm_runtime_set_suspended(&stm->dev);
+       pm_runtime_enable(&stm->dev);
+
        return 0;
 
 err_device:
@@ -724,6 +765,9 @@ void stm_unregister_device(struct stm_data *stm_data)
        struct stm_source_device *src, *iter;
        int i, ret;
 
+       pm_runtime_dont_use_autosuspend(&stm->dev);
+       pm_runtime_disable(&stm->dev);
+
        mutex_lock(&stm->link_mutex);
        list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
                ret = __stm_source_link_drop(src, stm);
@@ -878,6 +922,8 @@ static int __stm_source_link_drop(struct stm_source_device *src,
 
        stm_output_free(link, &src->output);
        list_del_init(&src->link_entry);
+       pm_runtime_mark_last_busy(&link->dev);
+       pm_runtime_put_autosuspend(&link->dev);
        /* matches stm_find_device() from stm_source_link_store() */
        stm_put_device(link);
        rcu_assign_pointer(src->link, NULL);
@@ -971,8 +1017,11 @@ static ssize_t stm_source_link_store(struct device *dev,
        if (!link)
                return -EINVAL;
 
+       pm_runtime_get(&link->dev);
+
        err = stm_source_link_add(src, link);
        if (err) {
+               pm_runtime_put_autosuspend(&link->dev);
                /* matches the stm_find_device() above */
                stm_put_device(link);
        }
@@ -1033,6 +1082,9 @@ int stm_source_register_device(struct device *parent,
        if (err)
                goto err;
 
+       pm_runtime_no_callbacks(&src->dev);
+       pm_runtime_forbid(&src->dev);
+
        err = device_add(&src->dev);
        if (err)
                goto err;
index cc6439ab3f714145f4d86c49c3d3c79716a058b1..041050edd80991713d9bb5a776c9a1d402f23bf5 100644 (file)
@@ -1268,6 +1268,8 @@ static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
                }
        }
 
+       idx = 0;
+
        do {
                if (msgs[idx].len == 0) {
                        ret = -EINVAL;
index 445398c314a3303a41ba51313bbcbfd3a40c421c..b126dbaa47e37014acaf78ba528a6e177bffa9d8 100644 (file)
@@ -912,7 +912,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
        ret = tegra_i2c_init(i2c_dev);
        if (ret) {
                dev_err(&pdev->dev, "Failed to initialize i2c controller");
-               goto unprepare_div_clk;
+               goto disable_div_clk;
        }
 
        ret = devm_request_irq(&pdev->dev, i2c_dev->irq,
index e33022e2d459f18b57027337c9c9de9243488c07..6e5fac6a5262a0824d69ebe534b18fd855d35922 100644 (file)
@@ -56,9 +56,7 @@ EXPORT_SYMBOL_GPL(__i2c_first_dynamic_bus_num);
  * The board info passed can safely be __initdata, but be careful of embedded
  * pointers (for platform_data, functions, etc) since that won't be copied.
  */
-int __init
-i2c_register_board_info(int busnum,
-       struct i2c_board_info const *info, unsigned len)
+int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned len)
 {
        int status;
 
index 26e7c5187a589b473857fd991acdd60a1bdb4609..c6a90b4a9c626dcf4fdf65e51e2f01b5b6ac4968 100644 (file)
@@ -145,7 +145,7 @@ static int i2c_mux_reg_probe_dt(struct regmux *mux,
                mux->data.idle_in_use = true;
 
        /* map address from "reg" if exists */
-       if (of_address_to_resource(np, 0, &res)) {
+       if (of_address_to_resource(np, 0, &res) == 0) {
                mux->data.reg_size = resource_size(&res);
                mux->data.reg = devm_ioremap_resource(&pdev->dev, &res);
                if (IS_ERR(mux->data.reg))
index c96649292b556f18647f1dc2a3fa075cfe176ae8..b5dd41d13d3d5986fc555ebfa0909d793e83ce8d 100644 (file)
@@ -62,6 +62,7 @@
 #include <linux/cpu.h>
 #include <linux/module.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include <asm/mwait.h>
 #include <asm/msr.h>
 
@@ -1020,38 +1021,38 @@ static const struct idle_cpu idle_cpu_bxt = {
        { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu }
 
 static const struct x86_cpu_id intel_idle_ids[] __initconst = {
-       ICPU(0x1a, idle_cpu_nehalem),
-       ICPU(0x1e, idle_cpu_nehalem),
-       ICPU(0x1f, idle_cpu_nehalem),
-       ICPU(0x25, idle_cpu_nehalem),
-       ICPU(0x2c, idle_cpu_nehalem),
-       ICPU(0x2e, idle_cpu_nehalem),
-       ICPU(0x1c, idle_cpu_atom),
-       ICPU(0x26, idle_cpu_lincroft),
-       ICPU(0x2f, idle_cpu_nehalem),
-       ICPU(0x2a, idle_cpu_snb),
-       ICPU(0x2d, idle_cpu_snb),
-       ICPU(0x36, idle_cpu_atom),
-       ICPU(0x37, idle_cpu_byt),
-       ICPU(0x4c, idle_cpu_cht),
-       ICPU(0x3a, idle_cpu_ivb),
-       ICPU(0x3e, idle_cpu_ivt),
-       ICPU(0x3c, idle_cpu_hsw),
-       ICPU(0x3f, idle_cpu_hsw),
-       ICPU(0x45, idle_cpu_hsw),
-       ICPU(0x46, idle_cpu_hsw),
-       ICPU(0x4d, idle_cpu_avn),
-       ICPU(0x3d, idle_cpu_bdw),
-       ICPU(0x47, idle_cpu_bdw),
-       ICPU(0x4f, idle_cpu_bdw),
-       ICPU(0x56, idle_cpu_bdw),
-       ICPU(0x4e, idle_cpu_skl),
-       ICPU(0x5e, idle_cpu_skl),
-       ICPU(0x8e, idle_cpu_skl),
-       ICPU(0x9e, idle_cpu_skl),
-       ICPU(0x55, idle_cpu_skx),
-       ICPU(0x57, idle_cpu_knl),
-       ICPU(0x5c, idle_cpu_bxt),
+       ICPU(INTEL_FAM6_NEHALEM_EP,             idle_cpu_nehalem),
+       ICPU(INTEL_FAM6_NEHALEM,                idle_cpu_nehalem),
+       ICPU(INTEL_FAM6_WESTMERE2,              idle_cpu_nehalem),
+       ICPU(INTEL_FAM6_WESTMERE,               idle_cpu_nehalem),
+       ICPU(INTEL_FAM6_WESTMERE_EP,            idle_cpu_nehalem),
+       ICPU(INTEL_FAM6_NEHALEM_EX,             idle_cpu_nehalem),
+       ICPU(INTEL_FAM6_ATOM_PINEVIEW,          idle_cpu_atom),
+       ICPU(INTEL_FAM6_ATOM_LINCROFT,          idle_cpu_lincroft),
+       ICPU(INTEL_FAM6_WESTMERE_EX,            idle_cpu_nehalem),
+       ICPU(INTEL_FAM6_SANDYBRIDGE,            idle_cpu_snb),
+       ICPU(INTEL_FAM6_SANDYBRIDGE_X,          idle_cpu_snb),
+       ICPU(INTEL_FAM6_ATOM_CEDARVIEW,         idle_cpu_atom),
+       ICPU(INTEL_FAM6_ATOM_SILVERMONT1,       idle_cpu_byt),
+       ICPU(INTEL_FAM6_ATOM_AIRMONT,           idle_cpu_cht),
+       ICPU(INTEL_FAM6_IVYBRIDGE,              idle_cpu_ivb),
+       ICPU(INTEL_FAM6_IVYBRIDGE_X,            idle_cpu_ivt),
+       ICPU(INTEL_FAM6_HASWELL_CORE,           idle_cpu_hsw),
+       ICPU(INTEL_FAM6_HASWELL_X,              idle_cpu_hsw),
+       ICPU(INTEL_FAM6_HASWELL_ULT,            idle_cpu_hsw),
+       ICPU(INTEL_FAM6_HASWELL_GT3E,           idle_cpu_hsw),
+       ICPU(INTEL_FAM6_ATOM_SILVERMONT2,       idle_cpu_avn),
+       ICPU(INTEL_FAM6_BROADWELL_CORE,         idle_cpu_bdw),
+       ICPU(INTEL_FAM6_BROADWELL_GT3E,         idle_cpu_bdw),
+       ICPU(INTEL_FAM6_BROADWELL_X,            idle_cpu_bdw),
+       ICPU(INTEL_FAM6_BROADWELL_XEON_D,       idle_cpu_bdw),
+       ICPU(INTEL_FAM6_SKYLAKE_MOBILE,         idle_cpu_skl),
+       ICPU(INTEL_FAM6_SKYLAKE_DESKTOP,        idle_cpu_skl),
+       ICPU(INTEL_FAM6_KABYLAKE_MOBILE,        idle_cpu_skl),
+       ICPU(INTEL_FAM6_KABYLAKE_DESKTOP,       idle_cpu_skl),
+       ICPU(INTEL_FAM6_SKYLAKE_X,              idle_cpu_skx),
+       ICPU(INTEL_FAM6_XEON_PHI_KNL,           idle_cpu_knl),
+       ICPU(INTEL_FAM6_ATOM_GOLDMONT,          idle_cpu_bxt),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
@@ -1261,13 +1262,13 @@ static void intel_idle_state_table_update(void)
 {
        switch (boot_cpu_data.x86_model) {
 
-       case 0x3e: /* IVT */
+       case INTEL_FAM6_IVYBRIDGE_X:
                ivt_idle_state_table_update();
                break;
-       case 0x5c: /* BXT */
+       case INTEL_FAM6_ATOM_GOLDMONT:
                bxt_idle_state_table_update();
                break;
-       case 0x5e: /* SKL-H */
+       case INTEL_FAM6_SKYLAKE_DESKTOP:
                sklh_idle_state_table_update();
                break;
        }
index 505e921f0b19e701382fd820ab54a4c6525d8838..6743b18194fb0b2545639902b11654505990144e 100644 (file)
@@ -46,6 +46,14 @@ config IIO_CONSUMERS_PER_TRIGGER
        This value controls the maximum number of consumers that a
        given trigger may handle. Default is 2.
 
+config IIO_SW_DEVICE
+       tristate "Enable software IIO device support"
+       select IIO_CONFIGFS
+       help
+        Provides IIO core support for software devices. A software
+        device can be created via configfs or directly by a driver
+        using the API provided.
+
 config IIO_SW_TRIGGER
        tristate "Enable software triggers support"
        select IIO_CONFIGFS
index 20f649073462e9e3e69f872a27115ad91fe95980..87e4c4369e2f5122ba36f73f63ecbc551760533b 100644 (file)
@@ -8,6 +8,7 @@ industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o
 industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o
 
 obj-$(CONFIG_IIO_CONFIGFS) += industrialio-configfs.o
+obj-$(CONFIG_IIO_SW_DEVICE) += industrialio-sw-device.o
 obj-$(CONFIG_IIO_SW_TRIGGER) += industrialio-sw-trigger.o
 obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
 
index e4a758cd7d3544eebfcca6cfbcb9481736e920a6..89d78208de3f30d6396162a208eb0f29460087b7 100644 (file)
@@ -17,6 +17,16 @@ config BMA180
          To compile this driver as a module, choose M here: the
          module will be called bma180.
 
+config BMA220
+    tristate "Bosch BMA220 3-Axis Accelerometer Driver"
+       depends on SPI
+    help
+      Say yes here to add support for the Bosch BMA220 triaxial
+      acceleration sensor.
+
+      To compile this driver as a module, choose M here: the
+      module will be called bma220_spi.
+
 config BMC150_ACCEL
        tristate "Bosch BMC150 Accelerometer Driver"
        select IIO_BUFFER
@@ -136,13 +146,23 @@ config MMA7455_SPI
          To compile this driver as a module, choose M here: the module
          will be called mma7455_spi.
 
+config MMA7660
+       tristate "Freescale MMA7660FC 3-Axis Accelerometer Driver"
+       depends on I2C
+       help
+         Say yes here to get support for the Freescale MMA7660FC 3-Axis
+         accelerometer.
+
+         Choosing M will build the driver as a module. If so, the module
+         will be called mma7660.
+
 config MMA8452
-       tristate "Freescale MMA8452Q and similar Accelerometers Driver"
+       tristate "Freescale / NXP MMA8452Q and similar Accelerometers Driver"
        depends on I2C
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
-         Say yes here to build support for the following Freescale 3-axis
+         Say yes here to build support for the following Freescale / NXP 3-axis
          accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC,
          FXLS8471Q.
 
index 71b6794de8858f967a1c8256a3dcd8b33e44e8cd..6cedbecca2eed6db31a527dcdfaa4891e2c4dc00 100644 (file)
@@ -4,6 +4,7 @@
 
 # When adding new entries keep the list in alphabetical order
 obj-$(CONFIG_BMA180) += bma180.o
+obj-$(CONFIG_BMA220) += bma220_spi.o
 obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
 obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
 obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
@@ -15,6 +16,8 @@ obj-$(CONFIG_MMA7455)         += mma7455_core.o
 obj-$(CONFIG_MMA7455_I2C)      += mma7455_i2c.o
 obj-$(CONFIG_MMA7455_SPI)      += mma7455_spi.o
 
+obj-$(CONFIG_MMA7660)  += mma7660.o
+
 obj-$(CONFIG_MMA8452)  += mma8452.o
 
 obj-$(CONFIG_MMA9551_CORE)     += mma9551_core.o
index f04b88406995175bc5159a8f300295e33ec3df80..e3f88ba5faf33ff4b7179fb96f603d20875f77a6 100644 (file)
@@ -654,7 +654,7 @@ static irqreturn_t bma180_trigger_handler(int irq, void *p)
        struct iio_poll_func *pf = p;
        struct iio_dev *indio_dev = pf->indio_dev;
        struct bma180_data *data = iio_priv(indio_dev);
-       int64_t time_ns = iio_get_time_ns();
+       s64 time_ns = iio_get_time_ns(indio_dev);
        int bit, ret, i = 0;
 
        mutex_lock(&data->mutex);
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
new file mode 100644 (file)
index 0000000..1098d10
--- /dev/null
@@ -0,0 +1,338 @@
+/**
+ * BMA220 Digital triaxial acceleration sensor driver
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/spi/spi.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define BMA220_REG_ID                          0x00
+#define BMA220_REG_ACCEL_X                     0x02
+#define BMA220_REG_ACCEL_Y                     0x03
+#define BMA220_REG_ACCEL_Z                     0x04
+#define BMA220_REG_RANGE                       0x11
+#define BMA220_REG_SUSPEND                     0x18
+
+#define BMA220_CHIP_ID                         0xDD
+#define BMA220_READ_MASK                       0x80
+#define BMA220_RANGE_MASK                      0x03
+#define BMA220_DATA_SHIFT                      2
+#define BMA220_SUSPEND_SLEEP                   0xFF
+#define BMA220_SUSPEND_WAKE                    0x00
+
+#define BMA220_DEVICE_NAME                     "bma220"
+#define BMA220_SCALE_AVAILABLE                 "0.623 1.248 2.491 4.983"
+
+#define BMA220_ACCEL_CHANNEL(index, reg, axis) {                       \
+       .type = IIO_ACCEL,                                              \
+       .address = reg,                                                 \
+       .modified = 1,                                                  \
+       .channel2 = IIO_MOD_##axis,                                     \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),                   \
+       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),           \
+       .scan_index = index,                                            \
+       .scan_type = {                                                  \
+               .sign = 's',                                            \
+               .realbits = 6,                                          \
+               .storagebits = 8,                                       \
+               .shift = BMA220_DATA_SHIFT,                             \
+               .endianness = IIO_CPU,                                  \
+       },                                                              \
+}
+
+enum bma220_axis {
+       AXIS_X,
+       AXIS_Y,
+       AXIS_Z,
+};
+
+static IIO_CONST_ATTR(in_accel_scale_available, BMA220_SCALE_AVAILABLE);
+
+static struct attribute *bma220_attributes[] = {
+       &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+       NULL,
+};
+
+static const struct attribute_group bma220_attribute_group = {
+       .attrs = bma220_attributes,
+};
+
+static const int bma220_scale_table[][4] = {
+       {0, 623000}, {1, 248000}, {2, 491000}, {4, 983000}
+};
+
+struct bma220_data {
+       struct spi_device *spi_device;
+       struct mutex lock;
+       s8 buffer[16]; /* 3x8-bit channels + 5x8 padding + 8x8 timestamp */
+       u8 tx_buf[2] ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec bma220_channels[] = {
+       BMA220_ACCEL_CHANNEL(0, BMA220_REG_ACCEL_X, X),
+       BMA220_ACCEL_CHANNEL(1, BMA220_REG_ACCEL_Y, Y),
+       BMA220_ACCEL_CHANNEL(2, BMA220_REG_ACCEL_Z, Z),
+       IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static inline int bma220_read_reg(struct spi_device *spi, u8 reg)
+{
+       return spi_w8r8(spi, reg | BMA220_READ_MASK);
+}
+
+static const unsigned long bma220_accel_scan_masks[] = {
+       BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+       0
+};
+
+static irqreturn_t bma220_trigger_handler(int irq, void *p)
+{
+       int ret;
+       struct iio_poll_func *pf = p;
+       struct iio_dev *indio_dev = pf->indio_dev;
+       struct bma220_data *data = iio_priv(indio_dev);
+       struct spi_device *spi = data->spi_device;
+
+       mutex_lock(&data->lock);
+       data->tx_buf[0] = BMA220_REG_ACCEL_X | BMA220_READ_MASK;
+       ret = spi_write_then_read(spi, data->tx_buf, 1, data->buffer,
+                                 ARRAY_SIZE(bma220_channels) - 1);
+       if (ret < 0)
+               goto err;
+
+       iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+                                          pf->timestamp);
+err:
+       mutex_unlock(&data->lock);
+       iio_trigger_notify_done(indio_dev->trig);
+
+       return IRQ_HANDLED;
+}
+
+static int bma220_read_raw(struct iio_dev *indio_dev,
+                          struct iio_chan_spec const *chan,
+                          int *val, int *val2, long mask)
+{
+       int ret;
+       u8 range_idx;
+       struct bma220_data *data = iio_priv(indio_dev);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               ret = bma220_read_reg(data->spi_device, chan->address);
+               if (ret < 0)
+                       return -EINVAL;
+               *val = sign_extend32(ret >> BMA220_DATA_SHIFT, 5);
+               return IIO_VAL_INT;
+       case IIO_CHAN_INFO_SCALE:
+               ret = bma220_read_reg(data->spi_device, BMA220_REG_RANGE);
+               if (ret < 0)
+                       return ret;
+               range_idx = ret & BMA220_RANGE_MASK;
+               *val = bma220_scale_table[range_idx][0];
+               *val2 = bma220_scale_table[range_idx][1];
+               return IIO_VAL_INT_PLUS_MICRO;
+       }
+
+       return -EINVAL;
+}
+
+static int bma220_write_raw(struct iio_dev *indio_dev,
+                           struct iio_chan_spec const *chan,
+                           int val, int val2, long mask)
+{
+       int i;
+       int ret;
+       int index = -1;
+       struct bma220_data *data = iio_priv(indio_dev);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_SCALE:
+               for (i = 0; i < ARRAY_SIZE(bma220_scale_table); i++)
+                       if (val == bma220_scale_table[i][0] &&
+                           val2 == bma220_scale_table[i][1]) {
+                               index = i;
+                               break;
+                       }
+               if (index < 0)
+                       return -EINVAL;
+
+               mutex_lock(&data->lock);
+               data->tx_buf[0] = BMA220_REG_RANGE;
+               data->tx_buf[1] = index;
+               ret = spi_write(data->spi_device, data->tx_buf,
+                               sizeof(data->tx_buf));
+               if (ret < 0)
+                       dev_err(&data->spi_device->dev,
+                               "failed to set measurement range\n");
+               mutex_unlock(&data->lock);
+
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static const struct iio_info bma220_info = {
+       .driver_module          = THIS_MODULE,
+       .read_raw               = bma220_read_raw,
+       .write_raw              = bma220_write_raw,
+       .attrs                  = &bma220_attribute_group,
+};
+
+static int bma220_init(struct spi_device *spi)
+{
+       int ret;
+
+       ret = bma220_read_reg(spi, BMA220_REG_ID);
+       if (ret != BMA220_CHIP_ID)
+               return -ENODEV;
+
+       /* Make sure the chip is powered on */
+       ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
+       if (ret < 0)
+               return ret;
+       else if (ret == BMA220_SUSPEND_WAKE)
+               return bma220_read_reg(spi, BMA220_REG_SUSPEND);
+
+       return 0;
+}
+
+static int bma220_deinit(struct spi_device *spi)
+{
+       int ret;
+
+       /* Make sure the chip is powered off */
+       ret = bma220_read_reg(spi, BMA220_REG_SUSPEND);
+       if (ret < 0)
+               return ret;
+       else if (ret == BMA220_SUSPEND_SLEEP)
+               return bma220_read_reg(spi, BMA220_REG_SUSPEND);
+
+       return 0;
+}
+
+static int bma220_probe(struct spi_device *spi)
+{
+       int ret;
+       struct iio_dev *indio_dev;
+       struct bma220_data *data;
+
+       indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+       if (!indio_dev) {
+               dev_err(&spi->dev, "iio allocation failed!\n");
+               return -ENOMEM;
+       }
+
+       data = iio_priv(indio_dev);
+       data->spi_device = spi;
+       spi_set_drvdata(spi, indio_dev);
+       mutex_init(&data->lock);
+
+       indio_dev->dev.parent = &spi->dev;
+       indio_dev->info = &bma220_info;
+       indio_dev->name = BMA220_DEVICE_NAME;
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = bma220_channels;
+       indio_dev->num_channels = ARRAY_SIZE(bma220_channels);
+       indio_dev->available_scan_masks = bma220_accel_scan_masks;
+
+       ret = bma220_init(data->spi_device);
+       if (ret < 0)
+               return ret;
+
+       ret = iio_triggered_buffer_setup(indio_dev, NULL,
+                                        bma220_trigger_handler, NULL);
+       if (ret < 0) {
+               dev_err(&spi->dev, "iio triggered buffer setup failed\n");
+               goto err_suspend;
+       }
+
+       ret = iio_device_register(indio_dev);
+       if (ret < 0) {
+               dev_err(&spi->dev, "iio_device_register failed\n");
+               iio_triggered_buffer_cleanup(indio_dev);
+               goto err_suspend;
+       }
+
+       return 0;
+
+err_suspend:
+       return bma220_deinit(spi);
+}
+
+static int bma220_remove(struct spi_device *spi)
+{
+       struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+       iio_device_unregister(indio_dev);
+       iio_triggered_buffer_cleanup(indio_dev);
+
+       return bma220_deinit(spi);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bma220_suspend(struct device *dev)
+{
+       struct bma220_data *data =
+                       iio_priv(spi_get_drvdata(to_spi_device(dev)));
+
+       /* The chip can be suspended/woken up by a simple register read. */
+       return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND);
+}
+
+static int bma220_resume(struct device *dev)
+{
+       struct bma220_data *data =
+                       iio_priv(spi_get_drvdata(to_spi_device(dev)));
+
+       return bma220_read_reg(data->spi_device, BMA220_REG_SUSPEND);
+}
+
+static SIMPLE_DEV_PM_OPS(bma220_pm_ops, bma220_suspend, bma220_resume);
+
+#define BMA220_PM_OPS (&bma220_pm_ops)
+#else
+#define BMA220_PM_OPS NULL
+#endif
+
+static const struct spi_device_id bma220_spi_id[] = {
+       {"bma220", 0},
+       {}
+};
+
+static const struct acpi_device_id bma220_acpi_id[] = {
+       {"BMA0220", 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(spi, bma220_spi_id);
+
+static struct spi_driver bma220_driver = {
+       .driver = {
+               .name = "bma220_spi",
+               .pm = BMA220_PM_OPS,
+               .acpi_match_table = ACPI_PTR(bma220_acpi_id),
+       },
+       .probe =            bma220_probe,
+       .remove =           bma220_remove,
+       .id_table =         bma220_spi_id,
+};
+
+module_spi_driver(bma220_driver);
+
+MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
+MODULE_DESCRIPTION("BMA220 acceleration sensor driver");
+MODULE_LICENSE("GPL v2");
index 197e693e7e7b795e9002e10430fd3f41399f96a7..bf17aae6614535b0ab3d68d3bfd84aec653a2f45 100644 (file)
@@ -901,7 +901,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
         */
        if (!irq) {
                data->old_timestamp = data->timestamp;
-               data->timestamp = iio_get_time_ns();
+               data->timestamp = iio_get_time_ns(indio_dev);
        }
 
        /*
@@ -1303,7 +1303,7 @@ static irqreturn_t bmc150_accel_irq_handler(int irq, void *private)
        int i;
 
        data->old_timestamp = data->timestamp;
-       data->timestamp = iio_get_time_ns();
+       data->timestamp = iio_get_time_ns(indio_dev);
 
        for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
                if (data->triggers[i].enabled) {
index bfe219a8bea244619b2e5ed9602998527328d856..765a72362dc61e37d32ad7cf75e805fcba1cc8a1 100644 (file)
@@ -1129,7 +1129,7 @@ static irqreturn_t kxcjk1013_data_rdy_trig_poll(int irq, void *private)
        struct iio_dev *indio_dev = private;
        struct kxcjk1013_data *data = iio_priv(indio_dev);
 
-       data->timestamp = iio_get_time_ns();
+       data->timestamp = iio_get_time_ns(indio_dev);
 
        if (data->dready_trigger_on)
                iio_trigger_poll(data->dready_trig);
index c902f54c23f57fee5ee587b8d701ae818f591785..6551085bedd75818741a48725d9df14fa17d1a34 100644 (file)
@@ -97,7 +97,8 @@ static irqreturn_t mma7455_trigger_handler(int irq, void *p)
        if (ret)
                goto done;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+                                          iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
new file mode 100644 (file)
index 0000000..0acdee5
--- /dev/null
@@ -0,0 +1,277 @@
+/**
+ * Freescale MMA7660FC 3-Axis Accelerometer
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for Freescale MMA7660FC; 7-bit I2C address: 0x4c.
+ */
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define MMA7660_DRIVER_NAME    "mma7660"
+
+#define MMA7660_REG_XOUT       0x00
+#define MMA7660_REG_YOUT       0x01
+#define MMA7660_REG_ZOUT       0x02
+#define MMA7660_REG_OUT_BIT_ALERT      BIT(6)
+
+#define MMA7660_REG_MODE       0x07
+#define MMA7660_REG_MODE_BIT_MODE      BIT(0)
+#define MMA7660_REG_MODE_BIT_TON       BIT(2)
+
+#define MMA7660_I2C_READ_RETRIES       5
+
+/*
+ * The accelerometer has one measurement range:
+ *
+ * -1.5g - +1.5g (6-bit, signed)
+ *
+ * scale = (1.5 + 1.5) * 9.81 / (2^6 - 1)      = 0.467142857
+ */
+
+#define MMA7660_SCALE_AVAIL    "0.467142857"
+
+const int mma7660_nscale = 467142857;
+
+#define MMA7660_CHANNEL(reg, axis) {   \
+       .type = IIO_ACCEL,      \
+       .address = reg, \
+       .modified = 1,  \
+       .channel2 = IIO_MOD_##axis,     \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),   \
+       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),   \
+}
+
+static const struct iio_chan_spec mma7660_channels[] = {
+       MMA7660_CHANNEL(MMA7660_REG_XOUT, X),
+       MMA7660_CHANNEL(MMA7660_REG_YOUT, Y),
+       MMA7660_CHANNEL(MMA7660_REG_ZOUT, Z),
+};
+
+enum mma7660_mode {
+       MMA7660_MODE_STANDBY,
+       MMA7660_MODE_ACTIVE
+};
+
+struct mma7660_data {
+       struct i2c_client *client;
+       struct mutex lock;
+       enum mma7660_mode mode;
+};
+
+static IIO_CONST_ATTR(in_accel_scale_available, MMA7660_SCALE_AVAIL);
+
+static struct attribute *mma7660_attributes[] = {
+       &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+       NULL,
+};
+
+static const struct attribute_group mma7660_attribute_group = {
+       .attrs = mma7660_attributes
+};
+
+static int mma7660_set_mode(struct mma7660_data *data,
+                               enum mma7660_mode mode)
+{
+       int ret;
+       struct i2c_client *client = data->client;
+
+       if (mode == data->mode)
+               return 0;
+
+       ret = i2c_smbus_read_byte_data(client, MMA7660_REG_MODE);
+       if (ret < 0) {
+               dev_err(&client->dev, "failed to read sensor mode\n");
+               return ret;
+       }
+
+       if (mode == MMA7660_MODE_ACTIVE) {
+               ret &= ~MMA7660_REG_MODE_BIT_TON;
+               ret |= MMA7660_REG_MODE_BIT_MODE;
+       } else {
+               ret &= ~MMA7660_REG_MODE_BIT_TON;
+               ret &= ~MMA7660_REG_MODE_BIT_MODE;
+       }
+
+       ret = i2c_smbus_write_byte_data(client, MMA7660_REG_MODE, ret);
+       if (ret < 0) {
+               dev_err(&client->dev, "failed to change sensor mode\n");
+               return ret;
+       }
+
+       data->mode = mode;
+
+       return ret;
+}
+
+static int mma7660_read_accel(struct mma7660_data *data, u8 address)
+{
+       int ret, retries = MMA7660_I2C_READ_RETRIES;
+       struct i2c_client *client = data->client;
+
+       /*
+        * Read data. If the Alert bit is set, the register was read at
+        * the same time as the device was attempting to update the content.
+        * The solution is to read the register again. Do this only
+        * MMA7660_I2C_READ_RETRIES times to avoid spending too much time
+        * in the kernel.
+        */
+       do {
+               ret = i2c_smbus_read_byte_data(client, address);
+               if (ret < 0) {
+                       dev_err(&client->dev, "register read failed\n");
+                       return ret;
+               }
+       } while (retries-- > 0 && ret & MMA7660_REG_OUT_BIT_ALERT);
+
+       if (ret & MMA7660_REG_OUT_BIT_ALERT) {
+               dev_err(&client->dev, "all register read retries failed\n");
+               return -ETIMEDOUT;
+       }
+
+       return ret;
+}
+
+static int mma7660_read_raw(struct iio_dev *indio_dev,
+                               struct iio_chan_spec const *chan,
+                               int *val, int *val2, long mask)
+{
+       struct mma7660_data *data = iio_priv(indio_dev);
+       int ret;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               mutex_lock(&data->lock);
+               ret = mma7660_read_accel(data, chan->address);
+               mutex_unlock(&data->lock);
+               if (ret < 0)
+                       return ret;
+               *val = sign_extend32(ret, 5);
+               return IIO_VAL_INT;
+       case IIO_CHAN_INFO_SCALE:
+               *val = 0;
+               *val2 = mma7660_nscale;
+               return IIO_VAL_INT_PLUS_NANO;
+       default:
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
+
+static const struct iio_info mma7660_info = {
+       .driver_module  = THIS_MODULE,
+       .read_raw               = mma7660_read_raw,
+       .attrs                  = &mma7660_attribute_group,
+};
+
+static int mma7660_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       int ret;
+       struct iio_dev *indio_dev;
+       struct mma7660_data *data;
+
+       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+       if (!indio_dev) {
+               dev_err(&client->dev, "iio allocation failed!\n");
+               return -ENOMEM;
+       }
+
+       data = iio_priv(indio_dev);
+       data->client = client;
+       i2c_set_clientdata(client, indio_dev);
+       mutex_init(&data->lock);
+       data->mode = MMA7660_MODE_STANDBY;
+
+       indio_dev->dev.parent = &client->dev;
+       indio_dev->info = &mma7660_info;
+       indio_dev->name = MMA7660_DRIVER_NAME;
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = mma7660_channels;
+       indio_dev->num_channels = ARRAY_SIZE(mma7660_channels);
+
+       ret = mma7660_set_mode(data, MMA7660_MODE_ACTIVE);
+       if (ret < 0)
+               return ret;
+
+       ret = iio_device_register(indio_dev);
+       if (ret < 0) {
+               dev_err(&client->dev, "device_register failed\n");
+               mma7660_set_mode(data, MMA7660_MODE_STANDBY);
+       }
+
+       return ret;
+}
+
+static int mma7660_remove(struct i2c_client *client)
+{
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+       iio_device_unregister(indio_dev);
+
+       return mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mma7660_suspend(struct device *dev)
+{
+       struct mma7660_data *data;
+
+       data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+       return mma7660_set_mode(data, MMA7660_MODE_STANDBY);
+}
+
+static int mma7660_resume(struct device *dev)
+{
+       struct mma7660_data *data;
+
+       data = iio_priv(i2c_get_clientdata(to_i2c_client(dev)));
+
+       return mma7660_set_mode(data, MMA7660_MODE_ACTIVE);
+}
+
+static SIMPLE_DEV_PM_OPS(mma7660_pm_ops, mma7660_suspend, mma7660_resume);
+
+#define MMA7660_PM_OPS (&mma7660_pm_ops)
+#else
+#define MMA7660_PM_OPS NULL
+#endif
+
+static const struct i2c_device_id mma7660_i2c_id[] = {
+       {"mma7660", 0},
+       {}
+};
+
+static const struct acpi_device_id mma7660_acpi_id[] = {
+       {"MMA7660", 0},
+       {}
+};
+
+MODULE_DEVICE_TABLE(acpi, mma7660_acpi_id);
+
+static struct i2c_driver mma7660_driver = {
+       .driver = {
+               .name = "mma7660",
+               .pm = MMA7660_PM_OPS,
+               .acpi_match_table = ACPI_PTR(mma7660_acpi_id),
+       },
+       .probe          = mma7660_probe,
+       .remove         = mma7660_remove,
+       .id_table       = mma7660_i2c_id,
+};
+
+module_i2c_driver(mma7660_driver);
+
+MODULE_AUTHOR("Constantin Musca <constantin.musca@intel.com>");
+MODULE_DESCRIPTION("Freescale MMA7660FC 3-Axis Accelerometer driver");
+MODULE_LICENSE("GPL v2");
index e225d3c53bd51c11cb84746837c4b6e05668fd60..d41e1b588e68f4b582561c395c24b77a85bef5bb 100644 (file)
@@ -1,22 +1,22 @@
 /*
- * mma8452.c - Support for following Freescale 3-axis accelerometers:
+ * mma8452.c - Support for following Freescale / NXP 3-axis accelerometers:
  *
- * MMA8451Q (14 bit)
- * MMA8452Q (12 bit)
- * MMA8453Q (10 bit)
- * MMA8652FC (12 bit)
- * MMA8653FC (10 bit)
- * FXLS8471Q (14 bit)
+ * device name digital output  7-bit I2C slave address (pin selectable)
+ * ---------------------------------------------------------------------
+ * MMA8451Q    14 bit          0x1c / 0x1d
+ * MMA8452Q    12 bit          0x1c / 0x1d
+ * MMA8453Q    10 bit          0x1c / 0x1d
+ * MMA8652FC   12 bit          0x1d
+ * MMA8653FC   10 bit          0x1d
+ * FXLS8471Q   14 bit          0x1e / 0x1d / 0x1c / 0x1f
  *
- * Copyright 2015 Martin Kepplinger <martin.kepplinger@theobroma-systems.com>
+ * Copyright 2015 Martin Kepplinger <martink@posteo.de>
  * Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
  *
  * This file is subject to the terms and conditions of version 2 of
  * the GNU General Public License.  See the file COPYING in the main
  * directory of this archive for more details.
  *
- * 7-bit I2C slave address 0x1c/0x1d (pin selectable)
- *
  * TODO: orientation events
  */
 
@@ -76,6 +76,8 @@
 #define  MMA8452_CTRL_DR_DEFAULT               0x4 /* 50 Hz sample frequency */
 #define MMA8452_CTRL_REG2                      0x2b
 #define  MMA8452_CTRL_REG2_RST                 BIT(6)
+#define  MMA8452_CTRL_REG2_MODS_SHIFT          3
+#define  MMA8452_CTRL_REG2_MODS_MASK           0x1b
 #define MMA8452_CTRL_REG4                      0x2d
 #define MMA8452_CTRL_REG5                      0x2e
 #define MMA8452_OFF_X                          0x2f
@@ -106,7 +108,7 @@ struct mma8452_data {
 };
 
 /**
- * struct mma_chip_info - chip specific data for Freescale's accelerometers
+ * struct mma_chip_info - chip specific data
  * @chip_id:                   WHO_AM_I register's value
  * @channels:                  struct iio_chan_spec matching the device's
  *                             capabilities
@@ -257,20 +259,17 @@ static const int mma8452_samp_freq[8][2] = {
        {6, 250000}, {1, 560000}
 };
 
-/* Datasheet table 35  (step time vs sample frequency) */
-static const int mma8452_transient_time_step_us[8] = {
-       1250,
-       2500,
-       5000,
-       10000,
-       20000,
-       20000,
-       20000,
-       20000
+/* Datasheet table: step time "Relationship with the ODR" (sample frequency) */
+static const int mma8452_transient_time_step_us[4][8] = {
+       { 1250, 2500, 5000, 10000, 20000, 20000, 20000, 20000 },  /* normal */
+       { 1250, 2500, 5000, 10000, 20000, 80000, 80000, 80000 },  /* l p l n */
+       { 1250, 2500, 2500, 2500, 2500, 2500, 2500, 2500 },       /* high res*/
+       { 1250, 2500, 5000, 10000, 20000, 80000, 160000, 160000 } /* l p */
 };
 
-/* Datasheet table 18 (normal mode) */
-static const int mma8452_hp_filter_cutoff[8][4][2] = {
+/* Datasheet table "High-Pass Filter Cutoff Options" */
+static const int mma8452_hp_filter_cutoff[4][8][4][2] = {
+       { /* normal */
        { {16, 0}, {8, 0}, {4, 0}, {2, 0} },            /* 800 Hz sample */
        { {16, 0}, {8, 0}, {4, 0}, {2, 0} },            /* 400 Hz sample */
        { {8, 0}, {4, 0}, {2, 0}, {1, 0} },             /* 200 Hz sample */
@@ -279,8 +278,61 @@ static const int mma8452_hp_filter_cutoff[8][4][2] = {
        { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} },   /* 12.5 Hz sample */
        { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} },   /* 6.25 Hz sample */
        { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} }    /* 1.56 Hz sample */
+       },
+       { /* low noise low power */
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {8, 0}, {4, 0}, {2, 0}, {1, 0} },
+       { {4, 0}, {2, 0}, {1, 0}, {0, 500000} },
+       { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} },
+       { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} },
+       { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} },
+       { {0, 500000}, {0, 250000}, {0, 125000}, {0, 063000} }
+       },
+       { /* high resolution */
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} }
+       },
+       { /* low power */
+       { {16, 0}, {8, 0}, {4, 0}, {2, 0} },
+       { {8, 0}, {4, 0}, {2, 0}, {1, 0} },
+       { {4, 0}, {2, 0}, {1, 0}, {0, 500000} },
+       { {2, 0}, {1, 0}, {0, 500000}, {0, 250000} },
+       { {1, 0}, {0, 500000}, {0, 250000}, {0, 125000} },
+       { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} },
+       { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} },
+       { {0, 250000}, {0, 125000}, {0, 063000}, {0, 031000} }
+       }
 };
 
+/* Datasheet table "MODS Oversampling modes averaging values at each ODR" */
+static const u16 mma8452_os_ratio[4][8] = {
+       /* 800 Hz, 400 Hz, ... , 1.56 Hz */
+       { 2, 4, 4, 4, 4, 16, 32, 128 },         /* normal */
+       { 2, 4, 4, 4, 4, 4, 8, 32 },            /* low power low noise */
+       { 2, 4, 8, 16, 32, 128, 256, 1024 },    /* high resolution */
+       { 2, 2, 2, 2, 2, 2, 4, 16 }             /* low power */
+};
+
+static int mma8452_get_power_mode(struct mma8452_data *data)
+{
+       int reg;
+
+       reg = i2c_smbus_read_byte_data(data->client,
+                                      MMA8452_CTRL_REG2);
+       if (reg < 0)
+               return reg;
+
+       return ((reg & MMA8452_CTRL_REG2_MODS_MASK) >>
+               MMA8452_CTRL_REG2_MODS_SHIFT);
+}
+
 static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
                                            struct device_attribute *attr,
                                            char *buf)
@@ -303,13 +355,42 @@ static ssize_t mma8452_show_scale_avail(struct device *dev,
 static ssize_t mma8452_show_hp_cutoff_avail(struct device *dev,
                                            struct device_attribute *attr,
                                            char *buf)
+{
+       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       struct mma8452_data *data = iio_priv(indio_dev);
+       int i, j;
+
+       i = mma8452_get_odr_index(data);
+       j = mma8452_get_power_mode(data);
+       if (j < 0)
+               return j;
+
+       return mma8452_show_int_plus_micros(buf, mma8452_hp_filter_cutoff[j][i],
+               ARRAY_SIZE(mma8452_hp_filter_cutoff[0][0]));
+}
+
+static ssize_t mma8452_show_os_ratio_avail(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
 {
        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
        struct mma8452_data *data = iio_priv(indio_dev);
        int i = mma8452_get_odr_index(data);
+       int j;
+       u16 val = 0;
+       size_t len = 0;
 
-       return mma8452_show_int_plus_micros(buf, mma8452_hp_filter_cutoff[i],
-               ARRAY_SIZE(mma8452_hp_filter_cutoff[0]));
+       for (j = 0; j < ARRAY_SIZE(mma8452_os_ratio); j++) {
+               if (val == mma8452_os_ratio[j][i])
+                       continue;
+
+               val = mma8452_os_ratio[j][i];
+
+               len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", val);
+       }
+       buf[len - 1] = '\n';
+
+       return len;
 }
 
 static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(mma8452_show_samp_freq_avail);
@@ -317,6 +398,8 @@ static IIO_DEVICE_ATTR(in_accel_scale_available, S_IRUGO,
                       mma8452_show_scale_avail, NULL, 0);
 static IIO_DEVICE_ATTR(in_accel_filter_high_pass_3db_frequency_available,
                       S_IRUGO, mma8452_show_hp_cutoff_avail, NULL, 0);
+static IIO_DEVICE_ATTR(in_accel_oversampling_ratio_available, S_IRUGO,
+                      mma8452_show_os_ratio_avail, NULL, 0);
 
 static int mma8452_get_samp_freq_index(struct mma8452_data *data,
                                       int val, int val2)
@@ -335,24 +418,33 @@ static int mma8452_get_scale_index(struct mma8452_data *data, int val, int val2)
 static int mma8452_get_hp_filter_index(struct mma8452_data *data,
                                       int val, int val2)
 {
-       int i = mma8452_get_odr_index(data);
+       int i, j;
+
+       i = mma8452_get_odr_index(data);
+       j = mma8452_get_power_mode(data);
+       if (j < 0)
+               return j;
 
-       return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[i],
-               ARRAY_SIZE(mma8452_hp_filter_cutoff[0]), val, val2);
+       return mma8452_get_int_plus_micros_index(mma8452_hp_filter_cutoff[j][i],
+               ARRAY_SIZE(mma8452_hp_filter_cutoff[0][0]), val, val2);
 }
 
 static int mma8452_read_hp_filter(struct mma8452_data *data, int *hz, int *uHz)
 {
-       int i, ret;
+       int j, i, ret;
 
        ret = i2c_smbus_read_byte_data(data->client, MMA8452_HP_FILTER_CUTOFF);
        if (ret < 0)
                return ret;
 
        i = mma8452_get_odr_index(data);
+       j = mma8452_get_power_mode(data);
+       if (j < 0)
+               return j;
+
        ret &= MMA8452_HP_FILTER_CUTOFF_SEL_MASK;
-       *hz = mma8452_hp_filter_cutoff[i][ret][0];
-       *uHz = mma8452_hp_filter_cutoff[i][ret][1];
+       *hz = mma8452_hp_filter_cutoff[j][i][ret][0];
+       *uHz = mma8452_hp_filter_cutoff[j][i][ret][1];
 
        return 0;
 }
@@ -414,6 +506,15 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
                }
 
                return IIO_VAL_INT_PLUS_MICRO;
+       case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+               ret = mma8452_get_power_mode(data);
+               if (ret < 0)
+                       return ret;
+
+               i = mma8452_get_odr_index(data);
+
+               *val = mma8452_os_ratio[ret][i];
+               return IIO_VAL_INT;
        }
 
        return -EINVAL;
@@ -480,6 +581,21 @@ fail:
        return ret;
 }
 
+static int mma8452_set_power_mode(struct mma8452_data *data, u8 mode)
+{
+       int reg;
+
+       reg = i2c_smbus_read_byte_data(data->client,
+                                      MMA8452_CTRL_REG2);
+       if (reg < 0)
+               return reg;
+
+       reg &= ~MMA8452_CTRL_REG2_MODS_MASK;
+       reg |= mode << MMA8452_CTRL_REG2_MODS_SHIFT;
+
+       return mma8452_change_config(data, MMA8452_CTRL_REG2, reg);
+}
+
 /* returns >0 if in freefall mode, 0 if not or <0 if an error occurred */
 static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
 {
@@ -518,11 +634,7 @@ static int mma8452_set_freefall_mode(struct mma8452_data *data, bool state)
                val |= MMA8452_FF_MT_CFG_OAE;
        }
 
-       val = mma8452_change_config(data, chip->ev_cfg, val);
-       if (val)
-               return val;
-
-       return 0;
+       return mma8452_change_config(data, chip->ev_cfg, val);
 }
 
 static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
@@ -597,6 +709,14 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
                return mma8452_change_config(data, MMA8452_DATA_CFG,
                                             data->data_cfg);
 
+       case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+               ret = mma8452_get_odr_index(data);
+
+               for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
+                       if (mma8452_os_ratio[i][ret] == val)
+                               return mma8452_set_power_mode(data, i);
+               }
+
        default:
                return -EINVAL;
        }
@@ -610,7 +730,7 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
                               int *val, int *val2)
 {
        struct mma8452_data *data = iio_priv(indio_dev);
-       int ret, us;
+       int ret, us, power_mode;
 
        switch (info) {
        case IIO_EV_INFO_VALUE:
@@ -629,7 +749,11 @@ static int mma8452_read_thresh(struct iio_dev *indio_dev,
                if (ret < 0)
                        return ret;
 
-               us = ret * mma8452_transient_time_step_us[
+               power_mode = mma8452_get_power_mode(data);
+               if (power_mode < 0)
+                       return power_mode;
+
+               us = ret * mma8452_transient_time_step_us[power_mode][
                                mma8452_get_odr_index(data)];
                *val = us / USEC_PER_SEC;
                *val2 = us % USEC_PER_SEC;
@@ -677,8 +801,12 @@ static int mma8452_write_thresh(struct iio_dev *indio_dev,
                                             val);
 
        case IIO_EV_INFO_PERIOD:
+               ret = mma8452_get_power_mode(data);
+               if (ret < 0)
+                       return ret;
+
                steps = (val * USEC_PER_SEC + val2) /
-                               mma8452_transient_time_step_us[
+                               mma8452_transient_time_step_us[ret][
                                        mma8452_get_odr_index(data)];
 
                if (steps < 0 || steps > 0xff)
@@ -785,7 +913,7 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
 static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
 {
        struct mma8452_data *data = iio_priv(indio_dev);
-       s64 ts = iio_get_time_ns();
+       s64 ts = iio_get_time_ns(indio_dev);
        int src;
 
        src = i2c_smbus_read_byte_data(data->client, data->chip_info->ev_src);
@@ -865,7 +993,7 @@ static irqreturn_t mma8452_trigger_handler(int irq, void *p)
                goto done;
 
        iio_push_to_buffers_with_timestamp(indio_dev, buffer,
-                                          iio_get_time_ns());
+                                          iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
@@ -978,7 +1106,8 @@ static struct attribute_group mma8452_event_attribute_group = {
                              BIT(IIO_CHAN_INFO_CALIBBIAS), \
        .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
                        BIT(IIO_CHAN_INFO_SCALE) | \
-                       BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY), \
+                       BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY) | \
+                       BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
        .scan_index = idx, \
        .scan_type = { \
                .sign = 's', \
@@ -998,7 +1127,8 @@ static struct attribute_group mma8452_event_attribute_group = {
        .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
                BIT(IIO_CHAN_INFO_CALIBBIAS), \
        .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
-               BIT(IIO_CHAN_INFO_SCALE), \
+               BIT(IIO_CHAN_INFO_SCALE) | \
+               BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
        .scan_index = idx, \
        .scan_type = { \
                .sign = 's', \
@@ -1171,6 +1301,7 @@ static struct attribute *mma8452_attributes[] = {
        &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
        &iio_dev_attr_in_accel_scale_available.dev_attr.attr,
        &iio_dev_attr_in_accel_filter_high_pass_3db_frequency_available.dev_attr.attr,
+       &iio_dev_attr_in_accel_oversampling_ratio_available.dev_attr.attr,
        NULL
 };
 
@@ -1444,8 +1575,8 @@ static int mma8452_probe(struct i2c_client *client,
                goto buffer_cleanup;
 
        ret = mma8452_set_freefall_mode(data, false);
-       if (ret)
-               return ret;
+       if (ret < 0)
+               goto buffer_cleanup;
 
        return 0;
 
@@ -1558,5 +1689,5 @@ static struct i2c_driver mma8452_driver = {
 module_i2c_driver(mma8452_driver);
 
 MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
-MODULE_DESCRIPTION("Freescale MMA8452 accelerometer driver");
+MODULE_DESCRIPTION("Freescale / NXP MMA8452 accelerometer driver");
 MODULE_LICENSE("GPL");
index d899a4d4307f9dbb9abf011281874ad629a6e026..bf2704435629b56f94a35dc7db37bcdd2732c108 100644 (file)
@@ -391,7 +391,7 @@ static irqreturn_t mma9551_event_handler(int irq, void *private)
        iio_push_event(indio_dev,
                       IIO_MOD_EVENT_CODE(IIO_INCLI, 0, (mma_axis + 1),
                                          IIO_EV_TYPE_ROC, IIO_EV_DIR_RISING),
-                      iio_get_time_ns());
+                      iio_get_time_ns(indio_dev));
 
 out:
        mutex_unlock(&data->mutex);
index bb05f3efddca04b381e552be34b5465e19167d0b..36bf19733be0bed4fb5eb0fad810c2cf84ab5d8c 100644 (file)
@@ -1001,7 +1001,7 @@ static irqreturn_t mma9553_irq_handler(int irq, void *private)
        struct iio_dev *indio_dev = private;
        struct mma9553_data *data = iio_priv(indio_dev);
 
-       data->timestamp = iio_get_time_ns();
+       data->timestamp = iio_get_time_ns(indio_dev);
        /*
         * Since we only configure the interrupt pin when an
         * event is enabled, we are sure we have at least
index 57f83a67948cc0758d8210e64b4a7a8f4513e9c7..f8dfdb6905632b3e23d2074eb8f0d993f5447ed9 100644 (file)
@@ -29,6 +29,7 @@
 #define LSM330_ACCEL_DEV_NAME          "lsm330_accel"
 #define LSM303AGR_ACCEL_DEV_NAME       "lsm303agr_accel"
 #define LIS2DH12_ACCEL_DEV_NAME                "lis2dh12_accel"
+#define LIS3L02DQ_ACCEL_DEV_NAME       "lis3l02dq"
 
 /**
 * struct st_sensors_platform_data - default accel platform data
index 4d95bfc4786cafc6de2fcc659950860596193877..da3fb069ec5c06dc2bdd99868ee1ad4e114b471a 100644 (file)
 #define ST_ACCEL_6_IHL_IRQ_MASK                        0x80
 #define ST_ACCEL_6_MULTIREAD_BIT               true
 
+/* CUSTOM VALUES FOR SENSOR 7 */
+#define ST_ACCEL_7_ODR_ADDR                    0x20
+#define ST_ACCEL_7_ODR_MASK                    0x30
+#define ST_ACCEL_7_ODR_AVL_280HZ_VAL           0x00
+#define ST_ACCEL_7_ODR_AVL_560HZ_VAL           0x01
+#define ST_ACCEL_7_ODR_AVL_1120HZ_VAL          0x02
+#define ST_ACCEL_7_ODR_AVL_4480HZ_VAL          0x03
+#define ST_ACCEL_7_PW_ADDR                     0x20
+#define ST_ACCEL_7_PW_MASK                     0xc0
+#define ST_ACCEL_7_FS_AVL_2_GAIN               IIO_G_TO_M_S_2(488)
+#define ST_ACCEL_7_BDU_ADDR                    0x21
+#define ST_ACCEL_7_BDU_MASK                    0x40
+#define ST_ACCEL_7_DRDY_IRQ_ADDR               0x21
+#define ST_ACCEL_7_DRDY_IRQ_INT1_MASK          0x04
+#define ST_ACCEL_7_MULTIREAD_BIT               false
+
 static const struct iio_chan_spec st_accel_8bit_channels[] = {
        ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
                        BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -662,6 +678,54 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
                .multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
                .bootime = 2,
        },
+       {
+               /* No WAI register present */
+               .sensors_supported = {
+                       [0] = LIS3L02DQ_ACCEL_DEV_NAME,
+               },
+               .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+               .odr = {
+                       .addr = ST_ACCEL_7_ODR_ADDR,
+                       .mask = ST_ACCEL_7_ODR_MASK,
+                       .odr_avl = {
+                               { 280, ST_ACCEL_7_ODR_AVL_280HZ_VAL, },
+                               { 560, ST_ACCEL_7_ODR_AVL_560HZ_VAL, },
+                               { 1120, ST_ACCEL_7_ODR_AVL_1120HZ_VAL, },
+                               { 4480, ST_ACCEL_7_ODR_AVL_4480HZ_VAL, },
+                       },
+               },
+               .pw = {
+                       .addr = ST_ACCEL_7_PW_ADDR,
+                       .mask = ST_ACCEL_7_PW_MASK,
+                       .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+                       .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+               },
+               .enable_axis = {
+                       .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+                       .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+               },
+               .fs = {
+                       .fs_avl = {
+                               [0] = {
+                                       .num = ST_ACCEL_FS_AVL_2G,
+                                       .gain = ST_ACCEL_7_FS_AVL_2_GAIN,
+                               },
+                       },
+               },
+               /*
+                * The part has a BDU bit but if set the data is never
+                * updated so don't set it.
+                */
+               .bdu = {
+               },
+               .drdy_irq = {
+                       .addr = ST_ACCEL_7_DRDY_IRQ_ADDR,
+                       .mask_int1 = ST_ACCEL_7_DRDY_IRQ_INT1_MASK,
+                       .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+               },
+               .multi_read_bit = ST_ACCEL_7_MULTIREAD_BIT,
+               .bootime = 2,
+       },
 };
 
 static int st_accel_read_raw(struct iio_dev *indio_dev,
@@ -758,13 +822,15 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
        indio_dev->info = &accel_info;
        mutex_init(&adata->tb.buf_lock);
 
-       st_sensors_power_enable(indio_dev);
+       err = st_sensors_power_enable(indio_dev);
+       if (err)
+               return err;
 
        err = st_sensors_check_device_support(indio_dev,
                                        ARRAY_SIZE(st_accel_sensors_settings),
                                        st_accel_sensors_settings);
        if (err < 0)
-               return err;
+               goto st_accel_power_off;
 
        adata->num_data_channels = ST_ACCEL_NUMBER_DATA_CHANNELS;
        adata->multiread_bit = adata->sensor_settings->multi_read_bit;
@@ -781,11 +847,11 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
 
        err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data);
        if (err < 0)
-               return err;
+               goto st_accel_power_off;
 
        err = st_accel_allocate_ring(indio_dev);
        if (err < 0)
-               return err;
+               goto st_accel_power_off;
 
        if (irq > 0) {
                err = st_sensors_allocate_trigger(indio_dev,
@@ -808,6 +874,8 @@ st_accel_device_register_error:
                st_sensors_deallocate_trigger(indio_dev);
 st_accel_probe_trigger_error:
        st_accel_deallocate_ring(indio_dev);
+st_accel_power_off:
+       st_sensors_power_disable(indio_dev);
 
        return err;
 }
index 7333ee9fb11ba8ac4e2f37a2dce111a73df7bfc6..e9d427a5df7ca506e771e487d966c9b5427604bc 100644 (file)
@@ -80,6 +80,10 @@ static const struct of_device_id st_accel_of_match[] = {
                .compatible = "st,h3lis331dl-accel",
                .data = H3LIS331DL_DRIVER_NAME,
        },
+       {
+               .compatible = "st,lis3l02dq",
+               .data = LIS3L02DQ_ACCEL_DEV_NAME,
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -130,6 +134,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
        { LSM330_ACCEL_DEV_NAME },
        { LSM303AGR_ACCEL_DEV_NAME },
        { LIS2DH12_ACCEL_DEV_NAME },
+       { LIS3L02DQ_ACCEL_DEV_NAME },
        {},
 };
 MODULE_DEVICE_TABLE(i2c, st_accel_id_table);
index fcd5847a3fd34a11531e2db3ae6ed0365cd71bd5..efd43941d45d772e1802d27569c35806708d2a9c 100644 (file)
@@ -59,6 +59,7 @@ static const struct spi_device_id st_accel_id_table[] = {
        { LSM330_ACCEL_DEV_NAME },
        { LSM303AGR_ACCEL_DEV_NAME },
        { LIS2DH12_ACCEL_DEV_NAME },
+       { LIS3L02DQ_ACCEL_DEV_NAME },
        {},
 };
 MODULE_DEVICE_TABLE(spi, st_accel_id_table);
index 25378c5882e2af347ed091b9f68b3900a4645fad..1de31bdd4ce4e2f12cda1c3e9c597df5c323038e 100644 (file)
@@ -153,6 +153,18 @@ config AXP288_ADC
          To compile this driver as a module, choose M here: the module will be
          called axp288_adc.
 
+config BCM_IPROC_ADC
+       tristate "Broadcom IPROC ADC driver"
+       depends on ARCH_BCM_IPROC || COMPILE_TEST
+       depends on MFD_SYSCON
+       default ARCH_BCM_CYGNUS
+       help
+         Say Y here if you want to add support for the Broadcom static
+         ADC driver.
+
+         Broadcom iProc ADC driver. Broadcom iProc ADC controller has 8
+         channels. The driver allows the user to read voltage values.
+
 config BERLIN2_ADC
        tristate "Marvell Berlin2 ADC driver"
        depends on ARCH_BERLIN
index 38638d46f972aece64f87f773fd28fb39856ce62..0ba0d500eedbb4c0b1a2ac8bbef3e4a23741cb35 100644 (file)
@@ -16,6 +16,7 @@ obj-$(CONFIG_AD799X) += ad799x.o
 obj-$(CONFIG_AT91_ADC) += at91_adc.o
 obj-$(CONFIG_AT91_SAMA5D2_ADC) += at91-sama5d2_adc.o
 obj-$(CONFIG_AXP288_ADC) += axp288_adc.o
+obj-$(CONFIG_BCM_IPROC_ADC) += bcm_iproc_adc.o
 obj-$(CONFIG_BERLIN2_ADC) += berlin2-adc.o
 obj-$(CONFIG_CC10001_ADC) += cc10001_adc.o
 obj-$(CONFIG_DA9150_GPADC) += da9150-gpadc.o
index 2123f0ac2e2a47b608eedc6095f1681802edfcd9..c0f6a98fd9bdb83805fa2d8eae199c7808544bf8 100644 (file)
@@ -154,12 +154,11 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
 
        switch (m) {
        case IIO_CHAN_INFO_RAW:
-               if (iio_buffer_enabled(indio_dev))
-                       return -EBUSY;
-
-               ret = ad7266_read_single(st, val, chan->address);
+               ret = iio_device_claim_direct_mode(indio_dev);
                if (ret)
                        return ret;
+               ret = ad7266_read_single(st, val, chan->address);
+               iio_device_release_direct_mode(indio_dev);
 
                *val = (*val >> 2) & 0xfff;
                if (chan->scan_type.sign == 's')
@@ -441,6 +440,7 @@ static int ad7266_probe(struct spi_device *spi)
        st->spi = spi;
 
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &ad7266_info;
index c0eabf156702a7d4da6a26a477d8cbc50e62c3cb..1d90b02732bbea7b31e394e20d298c89b146e9c2 100644 (file)
@@ -115,7 +115,7 @@ static irqreturn_t ad7291_event_handler(int irq, void *private)
        u16 t_status, v_status;
        u16 command;
        int i;
-       s64 timestamp = iio_get_time_ns();
+       s64 timestamp = iio_get_time_ns(indio_dev);
 
        if (ad7291_i2c_read(chip, AD7291_T_ALERT_STATUS, &t_status))
                return IRQ_HANDLED;
@@ -505,6 +505,7 @@ static int ad7291_probe(struct i2c_client *client,
        indio_dev->num_channels = ARRAY_SIZE(ad7291_channels);
 
        indio_dev->dev.parent = &client->dev;
+       indio_dev->dev.of_node = client->dev.of_node;
        indio_dev->info = &ad7291_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
 
index 62bb8f7ce4a0f93e0d657a584d56cd3feea75bd7..10ec8fce395fc974dc9befc7cafc1900e9bf1a8b 100644 (file)
@@ -163,7 +163,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
                goto done;
 
        iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
-               iio_get_time_ns());
+               iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
@@ -315,6 +315,7 @@ static int ad7298_probe(struct spi_device *spi)
 
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = ad7298_channels;
        indio_dev->num_channels = ARRAY_SIZE(ad7298_channels);
index be85c2a0ad97feeedd9b0a4ac4d61548d236c57b..b7ecf9aab90fa56ecf3e51584ce24c4bc423211b 100644 (file)
@@ -70,7 +70,7 @@ static irqreturn_t ad7476_trigger_handler(int irq, void  *p)
                goto done;
 
        iio_push_to_buffers_with_timestamp(indio_dev, st->data,
-               iio_get_time_ns());
+               iio_get_time_ns(indio_dev));
 done:
        iio_trigger_notify_done(indio_dev->trig);
 
@@ -106,12 +106,11 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
 
        switch (m) {
        case IIO_CHAN_INFO_RAW:
-               mutex_lock(&indio_dev->mlock);
-               if (iio_buffer_enabled(indio_dev))
-                       ret = -EBUSY;
-               else
-                       ret = ad7476_scan_direct(st);
-               mutex_unlock(&indio_dev->mlock);
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
+               ret = ad7476_scan_direct(st);
+               iio_device_release_direct_mode(indio_dev);
 
                if (ret < 0)
                        return ret;
@@ -228,6 +227,7 @@ static int ad7476_probe(struct spi_device *spi)
 
        /* Establish that the iio_dev is a child of the spi device */
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = st->chip_info->channel;
index cf172d58cd44ce18691c7219d2d1c1935265d25b..1817ebf5ad8416b3c071c6ad97783d063c248680 100644 (file)
@@ -272,30 +272,22 @@ static ssize_t ad7791_write_frequency(struct device *dev,
        struct ad7791_state *st = iio_priv(indio_dev);
        int i, ret;
 
-       mutex_lock(&indio_dev->mlock);
-       if (iio_buffer_enabled(indio_dev)) {
-               mutex_unlock(&indio_dev->mlock);
-               return -EBUSY;
-       }
-       mutex_unlock(&indio_dev->mlock);
-
-       ret = -EINVAL;
-
-       for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++) {
-               if (sysfs_streq(ad7791_sample_freq_avail[i], buf)) {
-
-                       mutex_lock(&indio_dev->mlock);
-                       st->filter &= ~AD7791_FILTER_RATE_MASK;
-                       st->filter |= i;
-                       ad_sd_write_reg(&st->sd, AD7791_REG_FILTER,
-                                        sizeof(st->filter), st->filter);
-                       mutex_unlock(&indio_dev->mlock);
-                       ret = 0;
+       for (i = 0; i < ARRAY_SIZE(ad7791_sample_freq_avail); i++)
+               if (sysfs_streq(ad7791_sample_freq_avail[i], buf))
                        break;
-               }
-       }
+       if (i == ARRAY_SIZE(ad7791_sample_freq_avail))
+               return -EINVAL;
+
+       ret = iio_device_claim_direct_mode(indio_dev);
+       if (ret)
+               return ret;
+       st->filter &= ~AD7791_FILTER_RATE_MASK;
+       st->filter |= i;
+       ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter),
+                       st->filter);
+       iio_device_release_direct_mode(indio_dev);
 
-       return ret ? ret : len;
+       return len;
 }
 
 static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
@@ -383,6 +375,7 @@ static int ad7791_probe(struct spi_device *spi)
        spi_set_drvdata(spi, indio_dev);
 
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = st->info->channels;
index 7b07bb651671c902082bb9a95640e14fd763a965..847789bae821c6870f64ecc504c5b78d79558cdd 100644 (file)
@@ -369,13 +369,6 @@ static ssize_t ad7793_write_frequency(struct device *dev,
        long lval;
        int i, ret;
 
-       mutex_lock(&indio_dev->mlock);
-       if (iio_buffer_enabled(indio_dev)) {
-               mutex_unlock(&indio_dev->mlock);
-               return -EBUSY;
-       }
-       mutex_unlock(&indio_dev->mlock);
-
        ret = kstrtol(buf, 10, &lval);
        if (ret)
                return ret;
@@ -383,20 +376,21 @@ static ssize_t ad7793_write_frequency(struct device *dev,
        if (lval == 0)
                return -EINVAL;
 
-       ret = -EINVAL;
-
        for (i = 0; i < 16; i++)
-               if (lval == st->chip_info->sample_freq_avail[i]) {
-                       mutex_lock(&indio_dev->mlock);
-                       st->mode &= ~AD7793_MODE_RATE(-1);
-                       st->mode |= AD7793_MODE_RATE(i);
-                       ad_sd_write_reg(&st->sd, AD7793_REG_MODE,
-                                        sizeof(st->mode), st->mode);
-                       mutex_unlock(&indio_dev->mlock);
-                       ret = 0;
-               }
+               if (lval == st->chip_info->sample_freq_avail[i])
+                       break;
+       if (i == 16)
+               return -EINVAL;
 
-       return ret ? ret : len;
+       ret = iio_device_claim_direct_mode(indio_dev);
+       if (ret)
+               return ret;
+       st->mode &= ~AD7793_MODE_RATE(-1);
+       st->mode |= AD7793_MODE_RATE(i);
+       ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode), st->mode);
+       iio_device_release_direct_mode(indio_dev);
+
+       return len;
 }
 
 static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
@@ -790,6 +784,7 @@ static int ad7793_probe(struct spi_device *spi)
        spi_set_drvdata(spi, indio_dev);
 
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = st->chip_info->channels;
index 2d3c397e66ad4f8ca1785657e0e7159734680b65..7a483bfbd70cc66947588a42f10d585b7eea519b 100644 (file)
@@ -122,7 +122,7 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
                goto done;
 
        iio_push_to_buffers_with_timestamp(indio_dev, st->data,
-               iio_get_time_ns());
+               iio_get_time_ns(indio_dev));
 done:
        iio_trigger_notify_done(indio_dev->trig);
 
@@ -156,12 +156,11 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
 
        switch (m) {
        case IIO_CHAN_INFO_RAW:
-               mutex_lock(&indio_dev->mlock);
-               if (iio_buffer_enabled(indio_dev))
-                       ret = -EBUSY;
-               else
-                       ret = ad7887_scan_direct(st, chan->address);
-               mutex_unlock(&indio_dev->mlock);
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
+               ret = ad7887_scan_direct(st, chan->address);
+               iio_device_release_direct_mode(indio_dev);
 
                if (ret < 0)
                        return ret;
@@ -265,6 +264,7 @@ static int ad7887_probe(struct spi_device *spi)
 
        /* Estabilish that the iio_dev is a child of the spi device */
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->info = &ad7887_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
index 45e29ccd824f25f9c6d167dc3aeadef3dc542e43..77a675e11ebb0ccdc4e64e6c527786c3b67dbd15 100644 (file)
@@ -181,7 +181,7 @@ static irqreturn_t ad7923_trigger_handler(int irq, void *p)
                goto done;
 
        iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
-               iio_get_time_ns());
+               iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
@@ -233,12 +233,11 @@ static int ad7923_read_raw(struct iio_dev *indio_dev,
 
        switch (m) {
        case IIO_CHAN_INFO_RAW:
-               mutex_lock(&indio_dev->mlock);
-               if (iio_buffer_enabled(indio_dev))
-                       ret = -EBUSY;
-               else
-                       ret = ad7923_scan_direct(st, chan->address);
-               mutex_unlock(&indio_dev->mlock);
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
+               ret = ad7923_scan_direct(st, chan->address);
+               iio_device_release_direct_mode(indio_dev);
 
                if (ret < 0)
                        return ret;
@@ -289,6 +288,7 @@ static int ad7923_probe(struct spi_device *spi)
 
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = info->channels;
        indio_dev->num_channels = info->num_channels;
index a3f5254f4e512d1f11e4be9c27b3333095757704..b6163764489c677a2ec45dd5536464734ac641c6 100644 (file)
@@ -212,7 +212,7 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
                goto out;
 
        iio_push_to_buffers_with_timestamp(indio_dev, st->rx_buf,
-                       iio_get_time_ns());
+                       iio_get_time_ns(indio_dev));
 out:
        iio_trigger_notify_done(indio_dev->trig);
 
@@ -282,12 +282,11 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
 
        switch (m) {
        case IIO_CHAN_INFO_RAW:
-               mutex_lock(&indio_dev->mlock);
-               if (iio_buffer_enabled(indio_dev))
-                       ret = -EBUSY;
-               else
-                       ret = ad799x_scan_direct(st, chan->scan_index);
-               mutex_unlock(&indio_dev->mlock);
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
+               ret = ad799x_scan_direct(st, chan->scan_index);
+               iio_device_release_direct_mode(indio_dev);
 
                if (ret < 0)
                        return ret;
@@ -395,11 +394,9 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
        struct ad799x_state *st = iio_priv(indio_dev);
        int ret;
 
-       mutex_lock(&indio_dev->mlock);
-       if (iio_buffer_enabled(indio_dev)) {
-               ret = -EBUSY;
-               goto done;
-       }
+       ret = iio_device_claim_direct_mode(indio_dev);
+       if (ret)
+               return ret;
 
        if (state)
                st->config |= BIT(chan->scan_index) << AD799X_CHANNEL_SHIFT;
@@ -412,10 +409,7 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
                st->config &= ~AD7998_ALERT_EN;
 
        ret = ad799x_write_config(st, st->config);
-
-done:
-       mutex_unlock(&indio_dev->mlock);
-
+       iio_device_release_direct_mode(indio_dev);
        return ret;
 }
 
@@ -508,7 +502,7 @@ static irqreturn_t ad799x_event_handler(int irq, void *private)
                                                            (i >> 1),
                                                            IIO_EV_TYPE_THRESH,
                                                            IIO_EV_DIR_FALLING),
-                                      iio_get_time_ns());
+                                      iio_get_time_ns(indio_dev));
        }
 
 done:
@@ -812,6 +806,7 @@ static int ad799x_probe(struct i2c_client *client,
        st->client = client;
 
        indio_dev->dev.parent = &client->dev;
+       indio_dev->dev.of_node = client->dev.of_node;
        indio_dev->name = id->name;
        indio_dev->info = st->chip_config->info;
 
diff --git a/drivers/iio/adc/bcm_iproc_adc.c b/drivers/iio/adc/bcm_iproc_adc.c
new file mode 100644 (file)
index 0000000..21d38c8
--- /dev/null
@@ -0,0 +1,644 @@
+/*
+ * Copyright 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation (the "GPL").
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 (GPLv2) for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 (GPLv2) along with this source code.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <linux/iio/iio.h>
+
+/* Below Register's are common to IPROC ADC and Touchscreen IP */
+#define IPROC_REGCTL1                  0x00
+#define IPROC_REGCTL2                  0x04
+#define IPROC_INTERRUPT_THRES          0x08
+#define IPROC_INTERRUPT_MASK           0x0c
+#define IPROC_INTERRUPT_STATUS         0x10
+#define IPROC_ANALOG_CONTROL           0x1c
+#define IPROC_CONTROLLER_STATUS                0x14
+#define IPROC_AUX_DATA                 0x20
+#define IPROC_SOFT_BYPASS_CONTROL      0x38
+#define IPROC_SOFT_BYPASS_DATA         0x3C
+
+/* IPROC ADC Channel register offsets */
+#define IPROC_ADC_CHANNEL_REGCTL1              0x800
+#define IPROC_ADC_CHANNEL_REGCTL2              0x804
+#define IPROC_ADC_CHANNEL_STATUS               0x808
+#define IPROC_ADC_CHANNEL_INTERRUPT_STATUS     0x80c
+#define IPROC_ADC_CHANNEL_INTERRUPT_MASK       0x810
+#define IPROC_ADC_CHANNEL_DATA                 0x814
+#define IPROC_ADC_CHANNEL_OFFSET               0x20
+
+/* Bit definitions for IPROC_REGCTL2 */
+#define IPROC_ADC_AUXIN_SCAN_ENA       BIT(0)
+#define IPROC_ADC_PWR_LDO              BIT(5)
+#define IPROC_ADC_PWR_ADC              BIT(4)
+#define IPROC_ADC_PWR_BG               BIT(3)
+#define IPROC_ADC_CONTROLLER_EN                BIT(17)
+
+/* Bit definitions for IPROC_INTERRUPT_MASK and IPROC_INTERRUPT_STATUS */
+#define IPROC_ADC_AUXDATA_RDY_INTR     BIT(3)
+#define IPROC_ADC_INTR                 9
+#define IPROC_ADC_INTR_MASK            (0xFF << IPROC_ADC_INTR)
+
+/* Bit definitions for IPROC_ANALOG_CONTROL */
+#define IPROC_ADC_CHANNEL_SEL          11
+#define IPROC_ADC_CHANNEL_SEL_MASK     (0x7 << IPROC_ADC_CHANNEL_SEL)
+
+/* Bit definitions for IPROC_ADC_CHANNEL_REGCTL1 */
+#define IPROC_ADC_CHANNEL_ROUNDS       0x2
+#define IPROC_ADC_CHANNEL_ROUNDS_MASK  (0x3F << IPROC_ADC_CHANNEL_ROUNDS)
+#define IPROC_ADC_CHANNEL_MODE         0x1
+#define IPROC_ADC_CHANNEL_MODE_MASK    (0x1 << IPROC_ADC_CHANNEL_MODE)
+#define IPROC_ADC_CHANNEL_MODE_TDM     0x1
+#define IPROC_ADC_CHANNEL_MODE_SNAPSHOT 0x0
+#define IPROC_ADC_CHANNEL_ENABLE       0x0
+#define IPROC_ADC_CHANNEL_ENABLE_MASK  0x1
+
+/* Bit definitions for IPROC_ADC_CHANNEL_REGCTL2 */
+#define IPROC_ADC_CHANNEL_WATERMARK    0x0
+#define IPROC_ADC_CHANNEL_WATERMARK_MASK \
+               (0x3F << IPROC_ADC_CHANNEL_WATERMARK)
+
+#define IPROC_ADC_WATER_MARK_LEVEL     0x1
+
+/* Bit definitions for IPROC_ADC_CHANNEL_STATUS */
+#define IPROC_ADC_CHANNEL_DATA_LOST            0x0
+#define IPROC_ADC_CHANNEL_DATA_LOST_MASK       \
+               (0x0 << IPROC_ADC_CHANNEL_DATA_LOST)
+#define IPROC_ADC_CHANNEL_VALID_ENTERIES       0x1
+#define IPROC_ADC_CHANNEL_VALID_ENTERIES_MASK  \
+               (0xFF << IPROC_ADC_CHANNEL_VALID_ENTERIES)
+#define IPROC_ADC_CHANNEL_TOTAL_ENTERIES       0x9
+#define IPROC_ADC_CHANNEL_TOTAL_ENTERIES_MASK  \
+               (0xFF << IPROC_ADC_CHANNEL_TOTAL_ENTERIES)
+
+/* Bit definitions for IPROC_ADC_CHANNEL_INTERRUPT_MASK */
+#define IPROC_ADC_CHANNEL_WTRMRK_INTR                  0x0
+#define IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK             \
+               (0x1 << IPROC_ADC_CHANNEL_WTRMRK_INTR)
+#define IPROC_ADC_CHANNEL_FULL_INTR                    0x1
+#define IPROC_ADC_CHANNEL_FULL_INTR_MASK               \
+               (0x1 << IPROC_ADC_IPROC_ADC_CHANNEL_FULL_INTR)
+#define IPROC_ADC_CHANNEL_EMPTY_INTR                   0x2
+#define IPROC_ADC_CHANNEL_EMPTY_INTR_MASK              \
+               (0x1 << IPROC_ADC_CHANNEL_EMPTY_INTR)
+
+#define IPROC_ADC_WATER_MARK_INTR_ENABLE               0x1
+
+/* Number of time to retry a set of the interrupt mask reg */
+#define IPROC_ADC_INTMASK_RETRY_ATTEMPTS               10
+
+#define IPROC_ADC_READ_TIMEOUT        (HZ*2)
+
+#define iproc_adc_dbg_reg(dev, priv, reg) \
+do { \
+       u32 val; \
+       regmap_read(priv->regmap, reg, &val); \
+       dev_dbg(dev, "%20s= 0x%08x\n", #reg, val); \
+} while (0)
+
+struct iproc_adc_priv {
+       struct regmap *regmap;
+       struct clk *adc_clk;
+       struct mutex mutex;
+       int  irqno;
+       int chan_val;
+       int chan_id;
+       struct completion completion;
+};
+
+static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
+{
+       struct device *dev = &indio_dev->dev;
+       struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_REGCTL1);
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_REGCTL2);
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_THRES);
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_MASK);
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_INTERRUPT_STATUS);
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_CONTROLLER_STATUS);
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_ANALOG_CONTROL);
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_AUX_DATA);
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_CONTROL);
+       iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
+}
+
+static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
+{
+       u32 channel_intr_status;
+       u32 intr_status;
+       u32 intr_mask;
+       struct iio_dev *indio_dev = data;
+       struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+       /*
+        * This interrupt is shared with the touchscreen driver.
+        * Make sure this interrupt is intended for us.
+        * Handle only ADC channel specific interrupts.
+        */
+       regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
+       regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &intr_mask);
+       intr_status = intr_status & intr_mask;
+       channel_intr_status = (intr_status & IPROC_ADC_INTR_MASK) >>
+                               IPROC_ADC_INTR;
+       if (channel_intr_status)
+               return IRQ_WAKE_THREAD;
+
+       return IRQ_NONE;
+}
+
+static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
+{
+       irqreturn_t retval = IRQ_NONE;
+       struct iproc_adc_priv *adc_priv;
+       struct iio_dev *indio_dev = data;
+       unsigned int valid_entries;
+       u32 intr_status;
+       u32 intr_channels;
+       u32 channel_status;
+       u32 ch_intr_status;
+
+       adc_priv = iio_priv(indio_dev);
+
+       regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
+       dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n",
+                       intr_status);
+
+       intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
+       if (intr_channels) {
+               regmap_read(adc_priv->regmap,
+                           IPROC_ADC_CHANNEL_INTERRUPT_STATUS +
+                           IPROC_ADC_CHANNEL_OFFSET * adc_priv->chan_id,
+                           &ch_intr_status);
+
+               if (ch_intr_status & IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK) {
+                       regmap_read(adc_priv->regmap,
+                                       IPROC_ADC_CHANNEL_STATUS +
+                                       IPROC_ADC_CHANNEL_OFFSET *
+                                       adc_priv->chan_id,
+                                       &channel_status);
+
+                       valid_entries = ((channel_status &
+                               IPROC_ADC_CHANNEL_VALID_ENTERIES_MASK) >>
+                               IPROC_ADC_CHANNEL_VALID_ENTERIES);
+                       if (valid_entries >= 1) {
+                               regmap_read(adc_priv->regmap,
+                                       IPROC_ADC_CHANNEL_DATA +
+                                       IPROC_ADC_CHANNEL_OFFSET *
+                                       adc_priv->chan_id,
+                                       &adc_priv->chan_val);
+                               complete(&adc_priv->completion);
+                       } else {
+                               dev_err(&indio_dev->dev,
+                                       "No data rcvd on channel %d\n",
+                                       adc_priv->chan_id);
+                       }
+                       regmap_write(adc_priv->regmap,
+                                       IPROC_ADC_CHANNEL_INTERRUPT_MASK +
+                                       IPROC_ADC_CHANNEL_OFFSET *
+                                       adc_priv->chan_id,
+                                       (ch_intr_status &
+                                       ~(IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK)));
+               }
+               regmap_write(adc_priv->regmap,
+                               IPROC_ADC_CHANNEL_INTERRUPT_STATUS +
+                               IPROC_ADC_CHANNEL_OFFSET * adc_priv->chan_id,
+                               ch_intr_status);
+               regmap_write(adc_priv->regmap, IPROC_INTERRUPT_STATUS,
+                               intr_channels);
+               retval = IRQ_HANDLED;
+       }
+
+       return retval;
+}
+
+static int iproc_adc_do_read(struct iio_dev *indio_dev,
+                          int channel,
+                          u16 *p_adc_data)
+{
+       int read_len = 0;
+       u32 val;
+       u32 mask;
+       u32 val_check;
+       int failed_cnt = 0;
+       struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+       mutex_lock(&adc_priv->mutex);
+
+       /*
+        * After a read is complete the ADC interrupts will be disabled so
+        * we can assume this section of code is safe from interrupts.
+        */
+       adc_priv->chan_val = -1;
+       adc_priv->chan_id = channel;
+
+       reinit_completion(&adc_priv->completion);
+       /* Clear any pending interrupt */
+       regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_STATUS,
+                       IPROC_ADC_INTR_MASK | IPROC_ADC_AUXDATA_RDY_INTR,
+                       ((0x0 << channel) << IPROC_ADC_INTR) |
+                       IPROC_ADC_AUXDATA_RDY_INTR);
+
+       /* Configure channel for snapshot mode and enable  */
+       val = (BIT(IPROC_ADC_CHANNEL_ROUNDS) |
+               (IPROC_ADC_CHANNEL_MODE_SNAPSHOT << IPROC_ADC_CHANNEL_MODE) |
+               (0x1 << IPROC_ADC_CHANNEL_ENABLE));
+
+       mask = IPROC_ADC_CHANNEL_ROUNDS_MASK | IPROC_ADC_CHANNEL_MODE_MASK |
+               IPROC_ADC_CHANNEL_ENABLE_MASK;
+       regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_REGCTL1 +
+                               IPROC_ADC_CHANNEL_OFFSET * channel),
+                               mask, val);
+
+       /* Set the Watermark for a channel */
+       regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_REGCTL2 +
+                                       IPROC_ADC_CHANNEL_OFFSET * channel),
+                                       IPROC_ADC_CHANNEL_WATERMARK_MASK,
+                                       0x1);
+
+       /* Enable water mark interrupt */
+       regmap_update_bits(adc_priv->regmap, (IPROC_ADC_CHANNEL_INTERRUPT_MASK +
+                                       IPROC_ADC_CHANNEL_OFFSET *
+                                       channel),
+                                       IPROC_ADC_CHANNEL_WTRMRK_INTR_MASK,
+                                       IPROC_ADC_WATER_MARK_INTR_ENABLE);
+       regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val);
+
+       /* Enable ADC interrupt for a channel */
+       val |= (BIT(channel) << IPROC_ADC_INTR);
+       regmap_write(adc_priv->regmap, IPROC_INTERRUPT_MASK, val);
+
+       /*
+        * There seems to be a very rare issue where writing to this register
+        * does not take effect.  To work around the issue we will try multiple
+        * writes.  In total we will spend about 10*10 = 100 us attempting this.
+        * Testing has shown that this may loop a few time, but we have never
+        * hit the full count.
+        */
+       regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
+       while (val_check != val) {
+               failed_cnt++;
+
+               if (failed_cnt > IPROC_ADC_INTMASK_RETRY_ATTEMPTS)
+                       break;
+
+               udelay(10);
+               regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_MASK,
+                               IPROC_ADC_INTR_MASK,
+                               ((0x1 << channel) <<
+                               IPROC_ADC_INTR));
+
+               regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
+       }
+
+       if (failed_cnt) {
+               dev_dbg(&indio_dev->dev,
+                       "IntMask failed (%d times)", failed_cnt);
+               if (failed_cnt > IPROC_ADC_INTMASK_RETRY_ATTEMPTS) {
+                       dev_err(&indio_dev->dev,
+                               "IntMask set failed. Read will likely fail.");
+                       read_len = -EIO;
+                       goto adc_err;
+               };
+       }
+       regmap_read(adc_priv->regmap, IPROC_INTERRUPT_MASK, &val_check);
+
+       if (wait_for_completion_timeout(&adc_priv->completion,
+               IPROC_ADC_READ_TIMEOUT) > 0) {
+
+               /* Only the lower 16 bits are relevant */
+               *p_adc_data = adc_priv->chan_val & 0xFFFF;
+               read_len = sizeof(*p_adc_data);
+
+       } else {
+               /*
+                * We never got the interrupt, something went wrong.
+                * Perhaps the interrupt may still be coming, we do not want
+                * that now.  Lets disable the ADC interrupt, and clear the
+                * status to put it back in to normal state.
+                */
+               read_len = -ETIMEDOUT;
+               goto adc_err;
+       }
+       mutex_unlock(&adc_priv->mutex);
+
+       return read_len;
+
+adc_err:
+       regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_MASK,
+                          IPROC_ADC_INTR_MASK,
+                          ((0x0 << channel) << IPROC_ADC_INTR));
+
+       regmap_update_bits(adc_priv->regmap, IPROC_INTERRUPT_STATUS,
+                          IPROC_ADC_INTR_MASK,
+                          ((0x0 << channel) << IPROC_ADC_INTR));
+
+       dev_err(&indio_dev->dev, "Timed out waiting for ADC data!\n");
+       iproc_adc_reg_dump(indio_dev);
+       mutex_unlock(&adc_priv->mutex);
+
+       return read_len;
+}
+
+static int iproc_adc_enable(struct iio_dev *indio_dev)
+{
+       u32 val;
+       u32 channel_id;
+       struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+       int ret;
+
+       /* Set i_amux = 3b'000, select channel 0 */
+       ret = regmap_update_bits(adc_priv->regmap, IPROC_ANALOG_CONTROL,
+                               IPROC_ADC_CHANNEL_SEL_MASK, 0);
+       if (ret) {
+               dev_err(&indio_dev->dev,
+                       "failed to write IPROC_ANALOG_CONTROL %d\n", ret);
+               return ret;
+       }
+       adc_priv->chan_val = -1;
+
+       /*
+        * PWR up LDO, ADC, and Band Gap (0 to enable)
+        * Also enable ADC controller (set high)
+        */
+       ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val);
+       if (ret) {
+               dev_err(&indio_dev->dev,
+                       "failed to read IPROC_REGCTL2 %d\n", ret);
+               return ret;
+       }
+
+       val &= ~(IPROC_ADC_PWR_LDO | IPROC_ADC_PWR_ADC | IPROC_ADC_PWR_BG);
+
+       ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val);
+       if (ret) {
+               dev_err(&indio_dev->dev,
+                       "failed to write IPROC_REGCTL2 %d\n", ret);
+               return ret;
+       }
+
+       ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val);
+       if (ret) {
+               dev_err(&indio_dev->dev,
+                       "failed to read IPROC_REGCTL2 %d\n", ret);
+               return ret;
+       }
+
+       val |= IPROC_ADC_CONTROLLER_EN;
+       ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val);
+       if (ret) {
+               dev_err(&indio_dev->dev,
+                       "failed to write IPROC_REGCTL2 %d\n", ret);
+               return ret;
+       }
+
+       for (channel_id = 0; channel_id < indio_dev->num_channels;
+               channel_id++) {
+               ret = regmap_write(adc_priv->regmap,
+                               IPROC_ADC_CHANNEL_INTERRUPT_MASK +
+                               IPROC_ADC_CHANNEL_OFFSET * channel_id, 0);
+               if (ret) {
+                       dev_err(&indio_dev->dev,
+                           "failed to write ADC_CHANNEL_INTERRUPT_MASK %d\n",
+                           ret);
+                       return ret;
+               }
+
+               ret = regmap_write(adc_priv->regmap,
+                               IPROC_ADC_CHANNEL_INTERRUPT_STATUS +
+                               IPROC_ADC_CHANNEL_OFFSET * channel_id, 0);
+               if (ret) {
+                       dev_err(&indio_dev->dev,
+                           "failed to write ADC_CHANNEL_INTERRUPT_STATUS %d\n",
+                           ret);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static void iproc_adc_disable(struct iio_dev *indio_dev)
+{
+       u32 val;
+       int ret;
+       struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+       ret = regmap_read(adc_priv->regmap, IPROC_REGCTL2, &val);
+       if (ret) {
+               dev_err(&indio_dev->dev,
+                       "failed to read IPROC_REGCTL2 %d\n", ret);
+               return;
+       }
+
+       val &= ~IPROC_ADC_CONTROLLER_EN;
+       ret = regmap_write(adc_priv->regmap, IPROC_REGCTL2, val);
+       if (ret) {
+               dev_err(&indio_dev->dev,
+                       "failed to write IPROC_REGCTL2 %d\n", ret);
+               return;
+       }
+}
+
+static int iproc_adc_read_raw(struct iio_dev *indio_dev,
+                         struct iio_chan_spec const *chan,
+                         int *val,
+                         int *val2,
+                         long mask)
+{
+       u16 adc_data;
+       int err;
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               err =  iproc_adc_do_read(indio_dev, chan->channel, &adc_data);
+               if (err < 0)
+                       return err;
+               *val = adc_data;
+               return IIO_VAL_INT;
+       case IIO_CHAN_INFO_SCALE:
+               switch (chan->type) {
+               case IIO_VOLTAGE:
+                       *val = 1800;
+                       *val2 = 10;
+                       return IIO_VAL_FRACTIONAL_LOG2;
+               default:
+                       return -EINVAL;
+               }
+       default:
+               return -EINVAL;
+       }
+}
+
+static const struct iio_info iproc_adc_iio_info = {
+       .read_raw = &iproc_adc_read_raw,
+       .driver_module = THIS_MODULE,
+};
+
+#define IPROC_ADC_CHANNEL(_index, _id) {                \
+       .type = IIO_VOLTAGE,                            \
+       .indexed = 1,                                   \
+       .channel = _index,                              \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),   \
+       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+       .datasheet_name = _id,                          \
+}
+
+static const struct iio_chan_spec iproc_adc_iio_channels[] = {
+       IPROC_ADC_CHANNEL(0, "adc0"),
+       IPROC_ADC_CHANNEL(1, "adc1"),
+       IPROC_ADC_CHANNEL(2, "adc2"),
+       IPROC_ADC_CHANNEL(3, "adc3"),
+       IPROC_ADC_CHANNEL(4, "adc4"),
+       IPROC_ADC_CHANNEL(5, "adc5"),
+       IPROC_ADC_CHANNEL(6, "adc6"),
+       IPROC_ADC_CHANNEL(7, "adc7"),
+};
+
+static int iproc_adc_probe(struct platform_device *pdev)
+{
+       struct iproc_adc_priv *adc_priv;
+       struct iio_dev *indio_dev = NULL;
+       int ret;
+
+       indio_dev = devm_iio_device_alloc(&pdev->dev,
+                                       sizeof(*adc_priv));
+       if (!indio_dev) {
+               dev_err(&pdev->dev, "failed to allocate iio device\n");
+               return -ENOMEM;
+       }
+
+       adc_priv = iio_priv(indio_dev);
+       platform_set_drvdata(pdev, indio_dev);
+
+       mutex_init(&adc_priv->mutex);
+
+       init_completion(&adc_priv->completion);
+
+       adc_priv->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                          "adc-syscon");
+       if (IS_ERR(adc_priv->regmap)) {
+               dev_err(&pdev->dev, "failed to get handle for tsc syscon\n");
+               ret = PTR_ERR(adc_priv->regmap);
+               return ret;
+       }
+
+       adc_priv->adc_clk = devm_clk_get(&pdev->dev, "tsc_clk");
+       if (IS_ERR(adc_priv->adc_clk)) {
+               dev_err(&pdev->dev,
+                       "failed getting clock tsc_clk\n");
+               ret = PTR_ERR(adc_priv->adc_clk);
+               return ret;
+       }
+
+       adc_priv->irqno = platform_get_irq(pdev, 0);
+       if (adc_priv->irqno <= 0) {
+               dev_err(&pdev->dev, "platform_get_irq failed\n");
+               ret = -ENODEV;
+               return ret;
+       }
+
+       ret = regmap_update_bits(adc_priv->regmap, IPROC_REGCTL2,
+                               IPROC_ADC_AUXIN_SCAN_ENA, 0);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to write IPROC_REGCTL2 %d\n", ret);
+               return ret;
+       }
+
+       ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
+                               iproc_adc_interrupt_thread,
+                               iproc_adc_interrupt_handler,
+                               IRQF_SHARED, "iproc-adc", indio_dev);
+       if (ret) {
+               dev_err(&pdev->dev, "request_irq error %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(adc_priv->adc_clk);
+       if (ret) {
+               dev_err(&pdev->dev,
+                       "clk_prepare_enable failed %d\n", ret);
+               return ret;
+       }
+
+       ret = iproc_adc_enable(indio_dev);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to enable adc %d\n", ret);
+               goto err_adc_enable;
+       }
+
+       indio_dev->name = "iproc-static-adc";
+       indio_dev->dev.parent = &pdev->dev;
+       indio_dev->dev.of_node = pdev->dev.of_node;
+       indio_dev->info = &iproc_adc_iio_info;
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = iproc_adc_iio_channels;
+       indio_dev->num_channels = ARRAY_SIZE(iproc_adc_iio_channels);
+
+       ret = iio_device_register(indio_dev);
+       if (ret) {
+               dev_err(&pdev->dev, "iio_device_register failed:err %d\n", ret);
+               goto err_clk;
+       }
+
+       return 0;
+
+err_clk:
+       iproc_adc_disable(indio_dev);
+err_adc_enable:
+       clk_disable_unprepare(adc_priv->adc_clk);
+
+       return ret;
+}
+
+static int iproc_adc_remove(struct platform_device *pdev)
+{
+       struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+       struct iproc_adc_priv *adc_priv = iio_priv(indio_dev);
+
+       iio_device_unregister(indio_dev);
+       iproc_adc_disable(indio_dev);
+       clk_disable_unprepare(adc_priv->adc_clk);
+
+       return 0;
+}
+
+static const struct of_device_id iproc_adc_of_match[] = {
+       {.compatible = "brcm,iproc-static-adc", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, iproc_adc_of_match);
+
+static struct platform_driver iproc_adc_driver = {
+       .probe  = iproc_adc_probe,
+       .remove = iproc_adc_remove,
+       .driver = {
+               .name   = "iproc-static-adc",
+               .of_match_table = of_match_ptr(iproc_adc_of_match),
+       },
+};
+module_platform_driver(iproc_adc_driver);
+
+MODULE_DESCRIPTION("Broadcom iProc ADC controller driver");
+MODULE_AUTHOR("Raveendra Padasalagi <raveendra.padasalagi@broadcom.com>");
+MODULE_LICENSE("GPL v2");
index 8254f529b2a9a00738eb017f9e5d0f77c4aacfd8..91636c0ba5b534da240084f48670dfefd01f6764 100644 (file)
@@ -186,7 +186,7 @@ done:
 
        if (!sample_invalid)
                iio_push_to_buffers_with_timestamp(indio_dev, data,
-                                                  iio_get_time_ns());
+                                                  iio_get_time_ns(indio_dev));
        iio_trigger_notify_done(indio_dev->trig);
 
        return IRQ_HANDLED;
index c73c6c62a6aca39e8b6ccebc7128719320f4f4b5..678e8c7ea7633afb0dacbdca89863fb009a8847c 100644 (file)
@@ -400,7 +400,7 @@ static void hi8435_iio_push_event(struct iio_dev *idev, unsigned int val)
                        iio_push_event(idev,
                                       IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
                                                    IIO_EV_TYPE_THRESH, dir),
-                                      iio_get_time_ns());
+                                      iio_get_time_ns(idev));
                }
        }
 
@@ -455,6 +455,7 @@ static int hi8435_probe(struct spi_device *spi)
        mutex_init(&priv->lock);
 
        idev->dev.parent        = &spi->dev;
+       idev->dev.of_node       = spi->dev.of_node;
        idev->name              = spi_get_device_id(spi)->name;
        idev->modes             = INDIO_DIRECT_MODE;
        idev->info              = &hi8435_info;
index 502f2fbe8aefca92e27c2e5449a46990c0d1da14..955f3fdaf51942358fd4f5716bf0e6303afa3da7 100644 (file)
@@ -465,7 +465,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
        s64 time_a, time_b;
        unsigned int alert;
 
-       time_a = iio_get_time_ns();
+       time_a = iio_get_time_ns(indio_dev);
 
        /*
         * Because the timer thread and the chip conversion clock
@@ -504,7 +504,7 @@ static int ina2xx_work_buffer(struct iio_dev *indio_dev)
                data[i++] = val;
        }
 
-       time_b = iio_get_time_ns();
+       time_b = iio_get_time_ns(indio_dev);
 
        iio_push_to_buffers_with_timestamp(indio_dev,
                                           (unsigned int *)data, time_a);
@@ -554,7 +554,7 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
        dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
                chip->allow_async_readout);
 
-       chip->prev_ns = iio_get_time_ns();
+       chip->prev_ns = iio_get_time_ns(indio_dev);
 
        chip->task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
                                 "%s:%d-%uus", indio_dev->name, indio_dev->id,
@@ -691,6 +691,7 @@ static int ina2xx_probe(struct i2c_client *client,
 
        indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
        indio_dev->dev.parent = &client->dev;
+       indio_dev->dev.of_node = client->dev.of_node;
        indio_dev->channels = ina2xx_channels;
        indio_dev->num_channels = ARRAY_SIZE(ina2xx_channels);
        indio_dev->name = id->name;
index 41d495c6035e1a882e03b17ca8066224e767cde6..712fbd2b1f16259d9027a74404c0278858f58368 100644 (file)
@@ -426,6 +426,7 @@ static int max1027_probe(struct spi_device *spi)
 
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->info = &max1027_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = st->info->channels;
index 998dc3caad4c15cabe58f7922a594e6976de23ec..841a13c9b6ea03cdae08f4e3f46d85fd89253124 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -788,7 +790,7 @@ static irqreturn_t max1363_event_handler(int irq, void *private)
 {
        struct iio_dev *indio_dev = private;
        struct max1363_state *st = iio_priv(indio_dev);
-       s64 timestamp = iio_get_time_ns();
+       s64 timestamp = iio_get_time_ns(indio_dev);
        unsigned long mask, loc;
        u8 rx;
        u8 tx[2] = { st->setupbyte,
@@ -1506,7 +1508,8 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
        if (b_sent < 0)
                goto done_free;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, rxbuf, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, rxbuf,
+                                          iio_get_time_ns(indio_dev));
 
 done_free:
        kfree(rxbuf);
@@ -1516,6 +1519,56 @@ done:
        return IRQ_HANDLED;
 }
 
+#ifdef CONFIG_OF
+
+#define MAX1363_COMPATIBLE(of_compatible, cfg) {               \
+                       .compatible = of_compatible,            \
+                       .data = &max1363_chip_info_tbl[cfg],    \
+}
+
+static const struct of_device_id max1363_of_match[] = {
+       MAX1363_COMPATIBLE("maxim,max1361", max1361),
+       MAX1363_COMPATIBLE("maxim,max1362", max1362),
+       MAX1363_COMPATIBLE("maxim,max1363", max1363),
+       MAX1363_COMPATIBLE("maxim,max1364", max1364),
+       MAX1363_COMPATIBLE("maxim,max1036", max1036),
+       MAX1363_COMPATIBLE("maxim,max1037", max1037),
+       MAX1363_COMPATIBLE("maxim,max1038", max1038),
+       MAX1363_COMPATIBLE("maxim,max1039", max1039),
+       MAX1363_COMPATIBLE("maxim,max1136", max1136),
+       MAX1363_COMPATIBLE("maxim,max1137", max1137),
+       MAX1363_COMPATIBLE("maxim,max1138", max1138),
+       MAX1363_COMPATIBLE("maxim,max1139", max1139),
+       MAX1363_COMPATIBLE("maxim,max1236", max1236),
+       MAX1363_COMPATIBLE("maxim,max1237", max1237),
+       MAX1363_COMPATIBLE("maxim,max1238", max1238),
+       MAX1363_COMPATIBLE("maxim,max1239", max1239),
+       MAX1363_COMPATIBLE("maxim,max11600", max11600),
+       MAX1363_COMPATIBLE("maxim,max11601", max11601),
+       MAX1363_COMPATIBLE("maxim,max11602", max11602),
+       MAX1363_COMPATIBLE("maxim,max11603", max11603),
+       MAX1363_COMPATIBLE("maxim,max11604", max11604),
+       MAX1363_COMPATIBLE("maxim,max11605", max11605),
+       MAX1363_COMPATIBLE("maxim,max11606", max11606),
+       MAX1363_COMPATIBLE("maxim,max11607", max11607),
+       MAX1363_COMPATIBLE("maxim,max11608", max11608),
+       MAX1363_COMPATIBLE("maxim,max11609", max11609),
+       MAX1363_COMPATIBLE("maxim,max11610", max11610),
+       MAX1363_COMPATIBLE("maxim,max11611", max11611),
+       MAX1363_COMPATIBLE("maxim,max11612", max11612),
+       MAX1363_COMPATIBLE("maxim,max11613", max11613),
+       MAX1363_COMPATIBLE("maxim,max11614", max11614),
+       MAX1363_COMPATIBLE("maxim,max11615", max11615),
+       MAX1363_COMPATIBLE("maxim,max11616", max11616),
+       MAX1363_COMPATIBLE("maxim,max11617", max11617),
+       MAX1363_COMPATIBLE("maxim,max11644", max11644),
+       MAX1363_COMPATIBLE("maxim,max11645", max11645),
+       MAX1363_COMPATIBLE("maxim,max11646", max11646),
+       MAX1363_COMPATIBLE("maxim,max11647", max11647),
+       { /* sentinel */ }
+};
+#endif
+
 static int max1363_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
@@ -1523,6 +1576,7 @@ static int max1363_probe(struct i2c_client *client,
        struct max1363_state *st;
        struct iio_dev *indio_dev;
        struct regulator *vref;
+       const struct of_device_id *match;
 
        indio_dev = devm_iio_device_alloc(&client->dev,
                                          sizeof(struct max1363_state));
@@ -1549,7 +1603,12 @@ static int max1363_probe(struct i2c_client *client,
        /* this is only used for device removal purposes */
        i2c_set_clientdata(client, indio_dev);
 
-       st->chip_info = &max1363_chip_info_tbl[id->driver_data];
+       match = of_match_device(of_match_ptr(max1363_of_match),
+                               &client->dev);
+       if (match)
+               st->chip_info = of_device_get_match_data(&client->dev);
+       else
+               st->chip_info = &max1363_chip_info_tbl[id->driver_data];
        st->client = client;
 
        st->vref_uv = st->chip_info->int_vref_mv * 1000;
@@ -1587,6 +1646,7 @@ static int max1363_probe(struct i2c_client *client,
 
        /* Establish that the iio_dev is a child of the i2c device */
        indio_dev->dev.parent = &client->dev;
+       indio_dev->dev.of_node = client->dev.of_node;
        indio_dev->name = id->name;
        indio_dev->channels = st->chip_info->channels;
        indio_dev->num_channels = st->chip_info->num_channels;
@@ -1692,6 +1752,7 @@ MODULE_DEVICE_TABLE(i2c, max1363_id);
 static struct i2c_driver max1363_driver = {
        .driver = {
                .name = "max1363",
+               .of_match_table = of_match_ptr(max1363_of_match),
        },
        .probe = max1363_probe,
        .remove = max1363_remove,
index a850ca7d1edacd38ef572234ff06b4613d2a74bd..634717ae12f354a79c77cea7fbd84633c1bb9f79 100644 (file)
@@ -308,6 +308,7 @@ static int mcp320x_probe(struct spi_device *spi)
        adc->spi = spi;
 
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &mcp320x_info;
index d1172dc1e8e2b9cfec641e7df3fc983d399a62b2..254135e07792295c886adcee861ccca960b317da 100644 (file)
@@ -352,6 +352,7 @@ static int mcp3422_probe(struct i2c_client *client,
        mutex_init(&adc->lock);
 
        indio_dev->dev.parent = &client->dev;
+       indio_dev->dev.of_node = client->dev.of_node;
        indio_dev->name = dev_name(&client->dev);
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &mcp3422_info;
index ad26da1edbee95ce547b6f488daf792720199b37..b84d37c80a944dd4080c1207b23e20e6489e010f 100644 (file)
@@ -373,13 +373,6 @@ static u32 mxs_lradc_plate_mask(struct mxs_lradc *lradc)
        return LRADC_CTRL0_MX28_PLATE_MASK;
 }
 
-static u32 mxs_lradc_irq_en_mask(struct mxs_lradc *lradc)
-{
-       if (lradc->soc == IMX23_LRADC)
-               return LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK;
-       return LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK;
-}
-
 static u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc)
 {
        if (lradc->soc == IMX23_LRADC)
@@ -1120,18 +1113,16 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
 {
        struct input_dev *input;
        struct device *dev = lradc->dev;
-       int ret;
 
        if (!lradc->use_touchscreen)
                return 0;
 
-       input = input_allocate_device();
+       input = devm_input_allocate_device(dev);
        if (!input)
                return -ENOMEM;
 
        input->name = DRIVER_NAME;
        input->id.bustype = BUS_HOST;
-       input->dev.parent = dev;
        input->open = mxs_lradc_ts_open;
        input->close = mxs_lradc_ts_close;
 
@@ -1146,20 +1137,8 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
 
        lradc->ts_input = input;
        input_set_drvdata(input, lradc);
-       ret = input_register_device(input);
-       if (ret)
-               input_free_device(lradc->ts_input);
-
-       return ret;
-}
-
-static void mxs_lradc_ts_unregister(struct mxs_lradc *lradc)
-{
-       if (!lradc->use_touchscreen)
-               return;
 
-       mxs_lradc_disable_ts(lradc);
-       input_unregister_device(lradc->ts_input);
+       return input_register_device(input);
 }
 
 /*
@@ -1510,7 +1489,9 @@ static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
 {
        int i;
 
-       mxs_lradc_reg_clear(lradc, mxs_lradc_irq_en_mask(lradc), LRADC_CTRL1);
+       mxs_lradc_reg_clear(lradc,
+               lradc->buffer_vchans << LRADC_CTRL1_LRADC_IRQ_EN_OFFSET,
+               LRADC_CTRL1);
 
        for (i = 0; i < LRADC_MAX_DELAY_CHANS; i++)
                mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(i));
@@ -1721,13 +1702,11 @@ static int mxs_lradc_probe(struct platform_device *pdev)
        ret = iio_device_register(iio);
        if (ret) {
                dev_err(dev, "Failed to register IIO device\n");
-               goto err_ts;
+               return ret;
        }
 
        return 0;
 
-err_ts:
-       mxs_lradc_ts_unregister(lradc);
 err_ts_register:
        mxs_lradc_hw_stop(lradc);
 err_dev:
@@ -1745,7 +1724,6 @@ static int mxs_lradc_remove(struct platform_device *pdev)
        struct mxs_lradc *lradc = iio_priv(iio);
 
        iio_device_unregister(iio);
-       mxs_lradc_ts_unregister(lradc);
        mxs_lradc_hw_stop(lradc);
        mxs_lradc_trigger_remove(iio);
        iio_triggered_buffer_cleanup(iio);
index e525aa6475c42b20cef6f3653da973620a2bccc9..db9b829ccf0daef20b6d9201deeba23e31b4222f 100644 (file)
@@ -79,10 +79,29 @@ static const struct iio_chan_spec nau7802_chan_array[] = {
 static const u16 nau7802_sample_freq_avail[] = {10, 20, 40, 80,
                                                10, 10, 10, 320};
 
+static ssize_t nau7802_show_scales(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct nau7802_state *st = iio_priv(dev_to_iio_dev(dev));
+       int i, len = 0;
+
+       for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+               len += scnprintf(buf + len, PAGE_SIZE - len, "0.%09d ",
+                                st->scale_avail[i]);
+
+       buf[len-1] = '\n';
+
+       return len;
+}
+
 static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 40 80 320");
 
+static IIO_DEVICE_ATTR(in_voltage_scale_available, S_IRUGO, nau7802_show_scales,
+                      NULL, 0);
+
 static struct attribute *nau7802_attributes[] = {
        &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+       &iio_dev_attr_in_voltage_scale_available.dev_attr.attr,
        NULL
 };
 
@@ -414,6 +433,7 @@ static int nau7802_probe(struct i2c_client *client,
        i2c_set_clientdata(client, indio_dev);
 
        indio_dev->dev.parent = &client->dev;
+       indio_dev->dev.of_node = client->dev.of_node;
        indio_dev->name = dev_name(&client->dev);
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &nau7802_info;
index 9fd032d9f402fbd57e58d77f186ca66410148d78..319172cf7da80f70e0c18fdea6bf33d68d1133ee 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/acpi.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/buffer.h>
@@ -138,7 +139,8 @@ static irqreturn_t adc081c_trigger_handler(int irq, void *p)
        if (ret < 0)
                goto out;
        buf[0] = ret;
-       iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+                                          iio_get_time_ns(indio_dev));
 out:
        iio_trigger_notify_done(indio_dev->trig);
        return IRQ_HANDLED;
@@ -149,12 +151,24 @@ static int adc081c_probe(struct i2c_client *client,
 {
        struct iio_dev *iio;
        struct adc081c *adc;
-       struct adcxx1c_model *model = &adcxx1c_models[id->driver_data];
+       struct adcxx1c_model *model;
        int err;
 
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
                return -EOPNOTSUPP;
 
+       if (ACPI_COMPANION(&client->dev)) {
+               const struct acpi_device_id *ad_id;
+
+               ad_id = acpi_match_device(client->dev.driver->acpi_match_table,
+                                         &client->dev);
+               if (!ad_id)
+                       return -ENODEV;
+               model = &adcxx1c_models[ad_id->driver_data];
+       } else {
+               model = &adcxx1c_models[id->driver_data];
+       }
+
        iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
        if (!iio)
                return -ENOMEM;
@@ -172,6 +186,7 @@ static int adc081c_probe(struct i2c_client *client,
                return err;
 
        iio->dev.parent = &client->dev;
+       iio->dev.of_node = client->dev.of_node;
        iio->name = dev_name(&client->dev);
        iio->modes = INDIO_DIRECT_MODE;
        iio->info = &adc081c_info;
@@ -231,10 +246,21 @@ static const struct of_device_id adc081c_of_match[] = {
 MODULE_DEVICE_TABLE(of, adc081c_of_match);
 #endif
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id adc081c_acpi_match[] = {
+       { "ADC081C", ADC081C },
+       { "ADC101C", ADC101C },
+       { "ADC121C", ADC121C },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match);
+#endif
+
 static struct i2c_driver adc081c_driver = {
        .driver = {
                .name = "adc081c",
                .of_match_table = of_match_ptr(adc081c_of_match),
+               .acpi_match_table = ACPI_PTR(adc081c_acpi_match),
        },
        .probe = adc081c_probe,
        .remove = adc081c_remove,
index 0afeac0c9bade4795179438fc6b8593b72b5541e..f4ba23effe9ad9c71fd9d947c3259cc4bbbd2537 100644 (file)
@@ -194,6 +194,7 @@ static int adc0832_probe(struct spi_device *spi)
 
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->info = &adc0832_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
 
index bc58867d6e8d64449a8ff757d281d2e2affdc1ef..89dfbd31be5c88569655a13c6fc72d70d6635e12 100644 (file)
@@ -150,6 +150,7 @@ static int adc128_probe(struct spi_device *spi)
        spi_set_drvdata(spi, indio_dev);
 
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &adc128_info;
index 73cbf0b54e54f6c13fe1d5892f2859915be18ef8..1ef398770a1f2c2dcbc760f911b5064ea4950954 100644 (file)
 #define ADS1015_DEFAULT_DATA_RATE      4
 #define ADS1015_DEFAULT_CHAN           0
 
+enum {
+       ADS1015,
+       ADS1115,
+};
+
 enum ads1015_channels {
        ADS1015_AIN0_AIN1 = 0,
        ADS1015_AIN0_AIN3,
@@ -71,6 +76,10 @@ static const unsigned int ads1015_data_rate[] = {
        128, 250, 490, 920, 1600, 2400, 3300, 3300
 };
 
+static const unsigned int ads1115_data_rate[] = {
+       8, 16, 32, 64, 128, 250, 475, 860
+};
+
 static const struct {
        int scale;
        int uscale;
@@ -101,6 +110,7 @@ static const struct {
                .shift = 4,                                     \
                .endianness = IIO_CPU,                          \
        },                                                      \
+       .datasheet_name = "AIN"#_chan,                          \
 }
 
 #define ADS1015_V_DIFF_CHAN(_chan, _chan2, _addr) {            \
@@ -121,6 +131,45 @@ static const struct {
                .shift = 4,                                     \
                .endianness = IIO_CPU,                          \
        },                                                      \
+       .datasheet_name = "AIN"#_chan"-AIN"#_chan2,             \
+}
+
+#define ADS1115_V_CHAN(_chan, _addr) {                         \
+       .type = IIO_VOLTAGE,                                    \
+       .indexed = 1,                                           \
+       .address = _addr,                                       \
+       .channel = _chan,                                       \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |          \
+                               BIT(IIO_CHAN_INFO_SCALE) |      \
+                               BIT(IIO_CHAN_INFO_SAMP_FREQ),   \
+       .scan_index = _addr,                                    \
+       .scan_type = {                                          \
+               .sign = 's',                                    \
+               .realbits = 16,                                 \
+               .storagebits = 16,                              \
+               .endianness = IIO_CPU,                          \
+       },                                                      \
+       .datasheet_name = "AIN"#_chan,                          \
+}
+
+#define ADS1115_V_DIFF_CHAN(_chan, _chan2, _addr) {            \
+       .type = IIO_VOLTAGE,                                    \
+       .differential = 1,                                      \
+       .indexed = 1,                                           \
+       .address = _addr,                                       \
+       .channel = _chan,                                       \
+       .channel2 = _chan2,                                     \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |          \
+                               BIT(IIO_CHAN_INFO_SCALE) |      \
+                               BIT(IIO_CHAN_INFO_SAMP_FREQ),   \
+       .scan_index = _addr,                                    \
+       .scan_type = {                                          \
+               .sign = 's',                                    \
+               .realbits = 16,                                 \
+               .storagebits = 16,                              \
+               .endianness = IIO_CPU,                          \
+       },                                                      \
+       .datasheet_name = "AIN"#_chan"-AIN"#_chan2,             \
 }
 
 struct ads1015_data {
@@ -131,6 +180,8 @@ struct ads1015_data {
         */
        struct mutex lock;
        struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+
+       unsigned int *data_rate;
 };
 
 static bool ads1015_is_writeable_reg(struct device *dev, unsigned int reg)
@@ -157,6 +208,18 @@ static const struct iio_chan_spec ads1015_channels[] = {
        IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
 };
 
+static const struct iio_chan_spec ads1115_channels[] = {
+       ADS1115_V_DIFF_CHAN(0, 1, ADS1015_AIN0_AIN1),
+       ADS1115_V_DIFF_CHAN(0, 3, ADS1015_AIN0_AIN3),
+       ADS1115_V_DIFF_CHAN(1, 3, ADS1015_AIN1_AIN3),
+       ADS1115_V_DIFF_CHAN(2, 3, ADS1015_AIN2_AIN3),
+       ADS1115_V_CHAN(0, ADS1015_AIN0),
+       ADS1115_V_CHAN(1, ADS1015_AIN1),
+       ADS1115_V_CHAN(2, ADS1015_AIN2),
+       ADS1115_V_CHAN(3, ADS1015_AIN3),
+       IIO_CHAN_SOFT_TIMESTAMP(ADS1015_TIMESTAMP),
+};
+
 static int ads1015_set_power_state(struct ads1015_data *data, bool on)
 {
        int ret;
@@ -196,7 +259,7 @@ int ads1015_get_adc_result(struct ads1015_data *data, int chan, int *val)
                return ret;
 
        if (change) {
-               conv_time = DIV_ROUND_UP(USEC_PER_SEC, ads1015_data_rate[dr]);
+               conv_time = DIV_ROUND_UP(USEC_PER_SEC, data->data_rate[dr]);
                usleep_range(conv_time, conv_time + 1);
        }
 
@@ -225,7 +288,8 @@ static irqreturn_t ads1015_trigger_handler(int irq, void *p)
        buf[0] = res;
        mutex_unlock(&data->lock);
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+                                          iio_get_time_ns(indio_dev));
 
 err:
        iio_trigger_notify_done(indio_dev->trig);
@@ -263,7 +327,7 @@ static int ads1015_set_data_rate(struct ads1015_data *data, int chan, int rate)
        int i, ret, rindex = -1;
 
        for (i = 0; i < ARRAY_SIZE(ads1015_data_rate); i++)
-               if (ads1015_data_rate[i] == rate) {
+               if (data->data_rate[i] == rate) {
                        rindex = i;
                        break;
                }
@@ -291,7 +355,9 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
        mutex_lock(&indio_dev->mlock);
        mutex_lock(&data->lock);
        switch (mask) {
-       case IIO_CHAN_INFO_RAW:
+       case IIO_CHAN_INFO_RAW: {
+               int shift = chan->scan_type.shift;
+
                if (iio_buffer_enabled(indio_dev)) {
                        ret = -EBUSY;
                        break;
@@ -307,8 +373,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
                        break;
                }
 
-               /* 12 bit res, D0 is bit 4 in conversion register */
-               *val = sign_extend32(*val >> 4, 11);
+               *val = sign_extend32(*val >> shift, 15 - shift);
 
                ret = ads1015_set_power_state(data, false);
                if (ret < 0)
@@ -316,6 +381,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
 
                ret = IIO_VAL_INT;
                break;
+       }
        case IIO_CHAN_INFO_SCALE:
                idx = data->channel_data[chan->address].pga;
                *val = ads1015_scale[idx].scale;
@@ -324,7 +390,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
                idx = data->channel_data[chan->address].data_rate;
-               *val = ads1015_data_rate[idx];
+               *val = data->data_rate[idx];
                ret = IIO_VAL_INT;
                break;
        default:
@@ -380,12 +446,15 @@ static const struct iio_buffer_setup_ops ads1015_buffer_setup_ops = {
 };
 
 static IIO_CONST_ATTR(scale_available, "3 2 1 0.5 0.25 0.125");
-static IIO_CONST_ATTR(sampling_frequency_available,
-                     "128 250 490 920 1600 2400 3300");
+
+static IIO_CONST_ATTR_NAMED(ads1015_sampling_frequency_available,
+       sampling_frequency_available, "128 250 490 920 1600 2400 3300");
+static IIO_CONST_ATTR_NAMED(ads1115_sampling_frequency_available,
+       sampling_frequency_available, "8 16 32 64 128 250 475 860");
 
 static struct attribute *ads1015_attributes[] = {
        &iio_const_attr_scale_available.dev_attr.attr,
-       &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+       &iio_const_attr_ads1015_sampling_frequency_available.dev_attr.attr,
        NULL,
 };
 
@@ -393,11 +462,28 @@ static const struct attribute_group ads1015_attribute_group = {
        .attrs = ads1015_attributes,
 };
 
-static const struct iio_info ads1015_info = {
+static struct attribute *ads1115_attributes[] = {
+       &iio_const_attr_scale_available.dev_attr.attr,
+       &iio_const_attr_ads1115_sampling_frequency_available.dev_attr.attr,
+       NULL,
+};
+
+static const struct attribute_group ads1115_attribute_group = {
+       .attrs = ads1115_attributes,
+};
+
+static struct iio_info ads1015_info = {
+       .driver_module  = THIS_MODULE,
+       .read_raw       = ads1015_read_raw,
+       .write_raw      = ads1015_write_raw,
+       .attrs          = &ads1015_attribute_group,
+};
+
+static struct iio_info ads1115_info = {
        .driver_module  = THIS_MODULE,
        .read_raw       = ads1015_read_raw,
        .write_raw      = ads1015_write_raw,
-       .attrs          = &ads1015_attribute_group,
+       .attrs          = &ads1115_attribute_group,
 };
 
 #ifdef CONFIG_OF
@@ -500,12 +586,25 @@ static int ads1015_probe(struct i2c_client *client,
        mutex_init(&data->lock);
 
        indio_dev->dev.parent = &client->dev;
-       indio_dev->info = &ads1015_info;
+       indio_dev->dev.of_node = client->dev.of_node;
        indio_dev->name = ADS1015_DRV_NAME;
-       indio_dev->channels = ads1015_channels;
-       indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
        indio_dev->modes = INDIO_DIRECT_MODE;
 
+       switch (id->driver_data) {
+       case ADS1015:
+               indio_dev->channels = ads1015_channels;
+               indio_dev->num_channels = ARRAY_SIZE(ads1015_channels);
+               indio_dev->info = &ads1015_info;
+               data->data_rate = (unsigned int *) &ads1015_data_rate;
+               break;
+       case ADS1115:
+               indio_dev->channels = ads1115_channels;
+               indio_dev->num_channels = ARRAY_SIZE(ads1115_channels);
+               indio_dev->info = &ads1115_info;
+               data->data_rate = (unsigned int *) &ads1115_data_rate;
+               break;
+       }
+
        /* we need to keep this ABI the same as used by hwmon ADS1015 driver */
        ads1015_get_channels_config(client);
 
@@ -590,7 +689,8 @@ static const struct dev_pm_ops ads1015_pm_ops = {
 };
 
 static const struct i2c_device_id ads1015_id[] = {
-       {"ads1015", 0},
+       {"ads1015", ADS1015},
+       {"ads1115", ADS1115},
        {}
 };
 MODULE_DEVICE_TABLE(i2c, ads1015_id);
index 03e907028cb6c1646eb55ce879f187bc5551f586..c400439900afd611b9444c86b8f28031fef85c66 100644 (file)
@@ -421,6 +421,7 @@ static int ads8688_probe(struct spi_device *spi)
 
        indio_dev->name = spi_get_device_id(spi)->name;
        indio_dev->dev.parent = &spi->dev;
+       indio_dev->dev.of_node = spi->dev.of_node;
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->channels = st->chip_info->channels;
        indio_dev->num_channels = st->chip_info->num_channels;
index c1e05532d437f263a9aa3d7f7c96147b13bfe682..8a368756881b82431404f86766d70b4b8acdd236 100644 (file)
@@ -326,8 +326,7 @@ static int tiadc_channel_init(struct iio_dev *indio_dev, int channels)
        int i;
 
        indio_dev->num_channels = channels;
-       chan_array = kcalloc(channels,
-                       sizeof(struct iio_chan_spec), GFP_KERNEL);
+       chan_array = kcalloc(channels, sizeof(*chan_array), GFP_KERNEL);
        if (chan_array == NULL)
                return -ENOMEM;
 
@@ -467,8 +466,7 @@ static int tiadc_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       indio_dev = devm_iio_device_alloc(&pdev->dev,
-                                         sizeof(struct tiadc_device));
+       indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*indio_dev));
        if (indio_dev == NULL) {
                dev_err(&pdev->dev, "failed to allocate iio device\n");
                return -ENOMEM;
@@ -531,8 +529,7 @@ static int tiadc_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int tiadc_suspend(struct device *dev)
+static int __maybe_unused tiadc_suspend(struct device *dev)
 {
        struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -550,7 +547,7 @@ static int tiadc_suspend(struct device *dev)
        return 0;
 }
 
-static int tiadc_resume(struct device *dev)
+static int __maybe_unused tiadc_resume(struct device *dev)
 {
        struct iio_dev *indio_dev = dev_get_drvdata(dev);
        struct tiadc_device *adc_dev = iio_priv(indio_dev);
@@ -567,14 +564,7 @@ static int tiadc_resume(struct device *dev)
        return 0;
 }
 
-static const struct dev_pm_ops tiadc_pm_ops = {
-       .suspend = tiadc_suspend,
-       .resume = tiadc_resume,
-};
-#define TIADC_PM_OPS (&tiadc_pm_ops)
-#else
-#define TIADC_PM_OPS NULL
-#endif
+static SIMPLE_DEV_PM_OPS(tiadc_pm_ops, tiadc_suspend, tiadc_resume);
 
 static const struct of_device_id ti_adc_dt_ids[] = {
        { .compatible = "ti,am3359-adc", },
@@ -585,7 +575,7 @@ MODULE_DEVICE_TABLE(of, ti_adc_dt_ids);
 static struct platform_driver tiadc_driver = {
        .driver = {
                .name   = "TI-am335x-adc",
-               .pm     = TIADC_PM_OPS,
+               .pm     = &tiadc_pm_ops,
                .of_match_table = ti_adc_dt_ids,
        },
        .probe  = tiadc_probe,
index 653bf1379d2e5ea02b62184e26fee530b34ae46c..228a003adeed222ee45e9d8823afaf04edb8bfe7 100644 (file)
@@ -594,7 +594,8 @@ static irqreturn_t vf610_adc_isr(int irq, void *dev_id)
                if (iio_buffer_enabled(indio_dev)) {
                        info->buffer[0] = info->value;
                        iio_push_to_buffers_with_timestamp(indio_dev,
-                                       info->buffer, iio_get_time_ns());
+                                       info->buffer,
+                                       iio_get_time_ns(indio_dev));
                        iio_trigger_notify_done(indio_dev->trig);
                } else
                        complete(&info->completion);
index edcf3aabd70d905e8a66910b0459b8a82af0df6d..6d5c2a6f4e6ea7382a461bef2d33476468e183fb 100644 (file)
@@ -46,7 +46,7 @@ static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event)
                iio_push_event(indio_dev,
                        IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
                                IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
-                       iio_get_time_ns());
+                       iio_get_time_ns(indio_dev));
        } else {
                /*
                 * For other channels we don't know whether it is a upper or
@@ -56,7 +56,7 @@ static void xadc_handle_event(struct iio_dev *indio_dev, unsigned int event)
                iio_push_event(indio_dev,
                        IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
                                IIO_EV_TYPE_THRESH, IIO_EV_DIR_EITHER),
-                       iio_get_time_ns());
+                       iio_get_time_ns(indio_dev));
        }
 }
 
index 212cbedc7abb645e73c5933ae34d1140b449ceb0..dd99d273bae9bf620f2543b39400d10e5ae66fd6 100644 (file)
@@ -305,7 +305,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
        queue->fileio.active_block = NULL;
 
        spin_lock_irq(&queue->list_lock);
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
                block = queue->fileio.blocks[i];
 
                /* If we can't re-use it free it */
@@ -323,7 +323,7 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
 
        INIT_LIST_HEAD(&queue->incoming);
 
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
                if (queue->fileio.blocks[i]) {
                        block = queue->fileio.blocks[i];
                        if (block->state == IIO_BLOCK_STATE_DEAD) {
index f73290f84c9004aa15cfb5a22bc3e038bc9fa901..4bcc025e8c8a5a41c5918bbd86cfb3d668fd7969 100644 (file)
@@ -5,15 +5,17 @@
 menu "Chemical Sensors"
 
 config ATLAS_PH_SENSOR
-       tristate "Atlas Scientific OEM pH-SM sensor"
+       tristate "Atlas Scientific OEM SM sensors"
        depends on I2C
        select REGMAP_I2C
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        select IRQ_WORK
        help
-        Say Y here to build I2C interface support for the Atlas
-        Scientific OEM pH-SM sensor.
+        Say Y here to build I2C interface support for the following
+        Atlas Scientific OEM SM sensors:
+           * pH SM sensor
+           * EC SM sensor
 
         To compile this driver as module, choose M here: the
         module will be called atlas-ph-sensor.
index 62b37cd8fb56d840c66b819df65a48ea56461c61..ae038a59d256c1c9f6ff89acb65819090e66a829 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq_work.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
+#include <linux/of_device.h>
 #include <linux/regmap.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/buffer.h>
 
 #define ATLAS_REG_PWR_CONTROL          0x06
 
-#define ATLAS_REG_CALIB_STATUS         0x0d
-#define ATLAS_REG_CALIB_STATUS_MASK    0x07
-#define ATLAS_REG_CALIB_STATUS_LOW     BIT(0)
-#define ATLAS_REG_CALIB_STATUS_MID     BIT(1)
-#define ATLAS_REG_CALIB_STATUS_HIGH    BIT(2)
+#define ATLAS_REG_PH_CALIB_STATUS      0x0d
+#define ATLAS_REG_PH_CALIB_STATUS_MASK 0x07
+#define ATLAS_REG_PH_CALIB_STATUS_LOW  BIT(0)
+#define ATLAS_REG_PH_CALIB_STATUS_MID  BIT(1)
+#define ATLAS_REG_PH_CALIB_STATUS_HIGH BIT(2)
 
-#define ATLAS_REG_TEMP_DATA            0x0e
+#define ATLAS_REG_EC_CALIB_STATUS              0x0f
+#define ATLAS_REG_EC_CALIB_STATUS_MASK         0x0f
+#define ATLAS_REG_EC_CALIB_STATUS_DRY          BIT(0)
+#define ATLAS_REG_EC_CALIB_STATUS_SINGLE       BIT(1)
+#define ATLAS_REG_EC_CALIB_STATUS_LOW          BIT(2)
+#define ATLAS_REG_EC_CALIB_STATUS_HIGH         BIT(3)
+
+#define ATLAS_REG_PH_TEMP_DATA         0x0e
 #define ATLAS_REG_PH_DATA              0x16
 
+#define ATLAS_REG_EC_PROBE             0x08
+#define ATLAS_REG_EC_TEMP_DATA         0x10
+#define ATLAS_REG_EC_DATA              0x18
+#define ATLAS_REG_TDS_DATA             0x1c
+#define ATLAS_REG_PSS_DATA             0x20
+
 #define ATLAS_PH_INT_TIME_IN_US                450000
+#define ATLAS_EC_INT_TIME_IN_US                650000
+
+enum {
+       ATLAS_PH_SM,
+       ATLAS_EC_SM,
+};
 
 struct atlas_data {
        struct i2c_client *client;
        struct iio_trigger *trig;
+       struct atlas_device *chip;
        struct regmap *regmap;
        struct irq_work work;
 
-       __be32 buffer[4]; /* 32-bit pH data + 32-bit pad + 64-bit timestamp */
+       __be32 buffer[6]; /* 96-bit data + 32-bit pad + 64-bit timestamp */
 };
 
 static const struct regmap_range atlas_volatile_ranges[] = {
        regmap_reg_range(ATLAS_REG_INT_CONTROL, ATLAS_REG_INT_CONTROL),
        regmap_reg_range(ATLAS_REG_PH_DATA, ATLAS_REG_PH_DATA + 4),
+       regmap_reg_range(ATLAS_REG_EC_DATA, ATLAS_REG_PSS_DATA + 4),
 };
 
 static const struct regmap_access_table atlas_volatile_table = {
@@ -80,13 +102,14 @@ static const struct regmap_config atlas_regmap_config = {
        .val_bits = 8,
 
        .volatile_table = &atlas_volatile_table,
-       .max_register = ATLAS_REG_PH_DATA + 4,
+       .max_register = ATLAS_REG_PSS_DATA + 4,
        .cache_type = REGCACHE_RBTREE,
 };
 
-static const struct iio_chan_spec atlas_channels[] = {
+static const struct iio_chan_spec atlas_ph_channels[] = {
        {
                .type = IIO_PH,
+               .address = ATLAS_REG_PH_DATA,
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
                .scan_index = 0,
@@ -100,7 +123,7 @@ static const struct iio_chan_spec atlas_channels[] = {
        IIO_CHAN_SOFT_TIMESTAMP(1),
        {
                .type = IIO_TEMP,
-               .address = ATLAS_REG_TEMP_DATA,
+               .address = ATLAS_REG_PH_TEMP_DATA,
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
                .output = 1,
@@ -108,6 +131,142 @@ static const struct iio_chan_spec atlas_channels[] = {
        },
 };
 
+#define ATLAS_EC_CHANNEL(_idx, _addr) \
+       {\
+               .type = IIO_CONCENTRATION, \
+               .indexed = 1, \
+               .channel = _idx, \
+               .address = _addr, \
+               .info_mask_separate = \
+                       BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), \
+               .scan_index = _idx + 1, \
+               .scan_type = { \
+                       .sign = 'u', \
+                       .realbits = 32, \
+                       .storagebits = 32, \
+                       .endianness = IIO_BE, \
+               }, \
+       }
+
+static const struct iio_chan_spec atlas_ec_channels[] = {
+       {
+               .type = IIO_ELECTRICALCONDUCTIVITY,
+               .address = ATLAS_REG_EC_DATA,
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+               .scan_index = 0,
+               .scan_type = {
+                       .sign = 'u',
+                       .realbits = 32,
+                       .storagebits = 32,
+                       .endianness = IIO_BE,
+               },
+       },
+       ATLAS_EC_CHANNEL(0, ATLAS_REG_TDS_DATA),
+       ATLAS_EC_CHANNEL(1, ATLAS_REG_PSS_DATA),
+       IIO_CHAN_SOFT_TIMESTAMP(3),
+       {
+               .type = IIO_TEMP,
+               .address = ATLAS_REG_EC_TEMP_DATA,
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+               .output = 1,
+               .scan_index = -1
+       },
+};
+
+static int atlas_check_ph_calibration(struct atlas_data *data)
+{
+       struct device *dev = &data->client->dev;
+       int ret;
+       unsigned int val;
+
+       ret = regmap_read(data->regmap, ATLAS_REG_PH_CALIB_STATUS, &val);
+       if (ret)
+               return ret;
+
+       if (!(val & ATLAS_REG_PH_CALIB_STATUS_MASK)) {
+               dev_warn(dev, "device has not been calibrated\n");
+               return 0;
+       }
+
+       if (!(val & ATLAS_REG_PH_CALIB_STATUS_LOW))
+               dev_warn(dev, "device missing low point calibration\n");
+
+       if (!(val & ATLAS_REG_PH_CALIB_STATUS_MID))
+               dev_warn(dev, "device missing mid point calibration\n");
+
+       if (!(val & ATLAS_REG_PH_CALIB_STATUS_HIGH))
+               dev_warn(dev, "device missing high point calibration\n");
+
+       return 0;
+}
+
+static int atlas_check_ec_calibration(struct atlas_data *data)
+{
+       struct device *dev = &data->client->dev;
+       int ret;
+       unsigned int val;
+
+       ret = regmap_bulk_read(data->regmap, ATLAS_REG_EC_PROBE, &val, 2);
+       if (ret)
+               return ret;
+
+       dev_info(dev, "probe set to K = %d.%.2d", be16_to_cpu(val) / 100,
+                                                be16_to_cpu(val) % 100);
+
+       ret = regmap_read(data->regmap, ATLAS_REG_EC_CALIB_STATUS, &val);
+       if (ret)
+               return ret;
+
+       if (!(val & ATLAS_REG_EC_CALIB_STATUS_MASK)) {
+               dev_warn(dev, "device has not been calibrated\n");
+               return 0;
+       }
+
+       if (!(val & ATLAS_REG_EC_CALIB_STATUS_DRY))
+               dev_warn(dev, "device missing dry point calibration\n");
+
+       if (val & ATLAS_REG_EC_CALIB_STATUS_SINGLE) {
+               dev_warn(dev, "device using single point calibration\n");
+       } else {
+               if (!(val & ATLAS_REG_EC_CALIB_STATUS_LOW))
+                       dev_warn(dev, "device missing low point calibration\n");
+
+               if (!(val & ATLAS_REG_EC_CALIB_STATUS_HIGH))
+                       dev_warn(dev, "device missing high point calibration\n");
+       }
+
+       return 0;
+}
+
+struct atlas_device {
+       const struct iio_chan_spec *channels;
+       int num_channels;
+       int data_reg;
+
+       int (*calibration)(struct atlas_data *data);
+       int delay;
+};
+
+static struct atlas_device atlas_devices[] = {
+       [ATLAS_PH_SM] = {
+                               .channels = atlas_ph_channels,
+                               .num_channels = 3,
+                               .data_reg = ATLAS_REG_PH_DATA,
+                               .calibration = &atlas_check_ph_calibration,
+                               .delay = ATLAS_PH_INT_TIME_IN_US,
+       },
+       [ATLAS_EC_SM] = {
+                               .channels = atlas_ec_channels,
+                               .num_channels = 5,
+                               .data_reg = ATLAS_REG_EC_DATA,
+                               .calibration = &atlas_check_ec_calibration,
+                               .delay = ATLAS_EC_INT_TIME_IN_US,
+       },
+
+};
+
 static int atlas_set_powermode(struct atlas_data *data, int on)
 {
        return regmap_write(data->regmap, ATLAS_REG_PWR_CONTROL, on);
@@ -178,12 +337,13 @@ static irqreturn_t atlas_trigger_handler(int irq, void *private)
        struct atlas_data *data = iio_priv(indio_dev);
        int ret;
 
-       ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
-                             (u8 *) &data->buffer, sizeof(data->buffer[0]));
+       ret = regmap_bulk_read(data->regmap, data->chip->data_reg,
+                             (u8 *) &data->buffer,
+                             sizeof(__be32) * (data->chip->num_channels - 2));
 
        if (!ret)
                iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
-                               iio_get_time_ns());
+                               iio_get_time_ns(indio_dev));
 
        iio_trigger_notify_done(indio_dev->trig);
 
@@ -200,7 +360,7 @@ static irqreturn_t atlas_interrupt_handler(int irq, void *private)
        return IRQ_HANDLED;
 }
 
-static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val)
+static int atlas_read_measurement(struct atlas_data *data, int reg, __be32 *val)
 {
        struct device *dev = &data->client->dev;
        int suspended = pm_runtime_suspended(dev);
@@ -213,11 +373,9 @@ static int atlas_read_ph_measurement(struct atlas_data *data, __be32 *val)
        }
 
        if (suspended)
-               usleep_range(ATLAS_PH_INT_TIME_IN_US,
-                            ATLAS_PH_INT_TIME_IN_US + 100000);
+               usleep_range(data->chip->delay, data->chip->delay + 100000);
 
-       ret = regmap_bulk_read(data->regmap, ATLAS_REG_PH_DATA,
-                             (u8 *) val, sizeof(*val));
+       ret = regmap_bulk_read(data->regmap, reg, (u8 *) val, sizeof(*val));
 
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
@@ -242,12 +400,15 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
                                              (u8 *) &reg, sizeof(reg));
                        break;
                case IIO_PH:
+               case IIO_CONCENTRATION:
+               case IIO_ELECTRICALCONDUCTIVITY:
                        mutex_lock(&indio_dev->mlock);
 
                        if (iio_buffer_enabled(indio_dev))
                                ret = -EBUSY;
                        else
-                               ret = atlas_read_ph_measurement(data, &reg);
+                               ret = atlas_read_measurement(data,
+                                                       chan->address, &reg);
 
                        mutex_unlock(&indio_dev->mlock);
                        break;
@@ -271,6 +432,14 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
                        *val = 1; /* 0.001 */
                        *val2 = 1000;
                        break;
+               case IIO_ELECTRICALCONDUCTIVITY:
+                       *val = 1; /* 0.00001 */
+                       *val = 100000;
+                       break;
+               case IIO_CONCENTRATION:
+                       *val = 0; /* 0.000000001 */
+                       *val2 = 1000;
+                       return IIO_VAL_INT_PLUS_NANO;
                default:
                        return -EINVAL;
                }
@@ -303,37 +472,26 @@ static const struct iio_info atlas_info = {
        .write_raw = atlas_write_raw,
 };
 
-static int atlas_check_calibration(struct atlas_data *data)
-{
-       struct device *dev = &data->client->dev;
-       int ret;
-       unsigned int val;
-
-       ret = regmap_read(data->regmap, ATLAS_REG_CALIB_STATUS, &val);
-       if (ret)
-               return ret;
-
-       if (!(val & ATLAS_REG_CALIB_STATUS_MASK)) {
-               dev_warn(dev, "device has not been calibrated\n");
-               return 0;
-       }
-
-       if (!(val & ATLAS_REG_CALIB_STATUS_LOW))
-               dev_warn(dev, "device missing low point calibration\n");
-
-       if (!(val & ATLAS_REG_CALIB_STATUS_MID))
-               dev_warn(dev, "device missing mid point calibration\n");
-
-       if (!(val & ATLAS_REG_CALIB_STATUS_HIGH))
-               dev_warn(dev, "device missing high point calibration\n");
+static const struct i2c_device_id atlas_id[] = {
+       { "atlas-ph-sm", ATLAS_PH_SM},
+       { "atlas-ec-sm", ATLAS_EC_SM},
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, atlas_id);
 
-       return 0;
+static const struct of_device_id atlas_dt_ids[] = {
+       { .compatible = "atlas,ph-sm", .data = (void *)ATLAS_PH_SM, },
+       { .compatible = "atlas,ec-sm", .data = (void *)ATLAS_EC_SM, },
+       { }
 };
+MODULE_DEVICE_TABLE(of, atlas_dt_ids);
 
 static int atlas_probe(struct i2c_client *client,
                       const struct i2c_device_id *id)
 {
        struct atlas_data *data;
+       struct atlas_device *chip;
+       const struct of_device_id *of_id;
        struct iio_trigger *trig;
        struct iio_dev *indio_dev;
        int ret;
@@ -342,10 +500,16 @@ static int atlas_probe(struct i2c_client *client,
        if (!indio_dev)
                return -ENOMEM;
 
+       of_id = of_match_device(atlas_dt_ids, &client->dev);
+       if (!of_id)
+               chip = &atlas_devices[id->driver_data];
+       else
+               chip = &atlas_devices[(unsigned long)of_id->data];
+
        indio_dev->info = &atlas_info;
        indio_dev->name = ATLAS_DRV_NAME;
-       indio_dev->channels = atlas_channels;
-       indio_dev->num_channels = ARRAY_SIZE(atlas_channels);
+       indio_dev->channels = chip->channels;
+       indio_dev->num_channels = chip->num_channels;
        indio_dev->modes = INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE;
        indio_dev->dev.parent = &client->dev;
 
@@ -358,6 +522,7 @@ static int atlas_probe(struct i2c_client *client,
        data = iio_priv(indio_dev);
        data->client = client;
        data->trig = trig;
+       data->chip = chip;
        trig->dev.parent = indio_dev->dev.parent;
        trig->ops = &atlas_interrupt_trigger_ops;
        iio_trigger_set_drvdata(trig, indio_dev);
@@ -379,7 +544,7 @@ static int atlas_probe(struct i2c_client *client,
                return -EINVAL;
        }
 
-       ret = atlas_check_calibration(data);
+       ret = chip->calibration(data);
        if (ret)
                return ret;
 
@@ -480,18 +645,6 @@ static const struct dev_pm_ops atlas_pm_ops = {
                           atlas_runtime_resume, NULL)
 };
 
-static const struct i2c_device_id atlas_id[] = {
-       { "atlas-ph-sm", 0 },
-       {}
-};
-MODULE_DEVICE_TABLE(i2c, atlas_id);
-
-static const struct of_device_id atlas_dt_ids[] = {
-       { .compatible = "atlas,ph-sm" },
-       { }
-};
-MODULE_DEVICE_TABLE(of, atlas_dt_ids);
-
 static struct i2c_driver atlas_driver = {
        .driver = {
                .name   = ATLAS_DRV_NAME,
index f1693dbebb8ac23359035786ef26b6607191058d..d06e728cea370172b4aa26829f9be5892cad747b 100644 (file)
 #include <linux/iio/common/st_sensors.h>
 
 
-int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
+static int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
 {
-       int i, len;
-       int total = 0;
+       int i;
        struct st_sensor_data *sdata = iio_priv(indio_dev);
        unsigned int num_data_channels = sdata->num_data_channels;
 
-       for (i = 0; i < num_data_channels; i++) {
-               unsigned int bytes_to_read;
-
-               if (test_bit(i, indio_dev->active_scan_mask)) {
-                       bytes_to_read = indio_dev->channels[i].scan_type.storagebits >> 3;
-                       len = sdata->tf->read_multiple_byte(&sdata->tb,
-                               sdata->dev, indio_dev->channels[i].address,
-                               bytes_to_read,
-                               buf + total, sdata->multiread_bit);
-
-                       if (len < bytes_to_read)
-                               return -EIO;
-
-                       /* Advance the buffer pointer */
-                       total += len;
-               }
+       for_each_set_bit(i, indio_dev->active_scan_mask, num_data_channels) {
+               const struct iio_chan_spec *channel = &indio_dev->channels[i];
+               unsigned int bytes_to_read = channel->scan_type.realbits >> 3;
+               unsigned int storage_bytes =
+                       channel->scan_type.storagebits >> 3;
+
+               buf = PTR_ALIGN(buf, storage_bytes);
+               if (sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
+                                                 channel->address,
+                                                 bytes_to_read, buf,
+                                                 sdata->multiread_bit) <
+                   bytes_to_read)
+                       return -EIO;
+
+               /* Advance the buffer pointer */
+               buf += storage_bytes;
        }
 
-       return total;
+       return 0;
 }
-EXPORT_SYMBOL(st_sensors_get_buffer_element);
 
 irqreturn_t st_sensors_trigger_handler(int irq, void *p)
 {
@@ -59,11 +57,16 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
        struct st_sensor_data *sdata = iio_priv(indio_dev);
        s64 timestamp;
 
-       /* If we do timetamping here, do it before reading the values */
+       /*
+        * If we do timetamping here, do it before reading the values, because
+        * once we've read the values, new interrupts can occur (when using
+        * the hardware trigger) and the hw_timestamp may get updated.
+        * By storing it in a local variable first, we are safe.
+        */
        if (sdata->hw_irq_trigger)
                timestamp = sdata->hw_timestamp;
        else
-               timestamp = iio_get_time_ns();
+               timestamp = iio_get_time_ns(indio_dev);
 
        len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
        if (len < 0)
index 9e59c90f6a8d7baa1ee091ea588b75a39414032c..2d5282e05482f7d3eaa9adce2f74ba098376ede3 100644 (file)
@@ -228,7 +228,7 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
 }
 EXPORT_SYMBOL(st_sensors_set_axis_enable);
 
-void st_sensors_power_enable(struct iio_dev *indio_dev)
+int st_sensors_power_enable(struct iio_dev *indio_dev)
 {
        struct st_sensor_data *pdata = iio_priv(indio_dev);
        int err;
@@ -237,18 +237,37 @@ void st_sensors_power_enable(struct iio_dev *indio_dev)
        pdata->vdd = devm_regulator_get_optional(indio_dev->dev.parent, "vdd");
        if (!IS_ERR(pdata->vdd)) {
                err = regulator_enable(pdata->vdd);
-               if (err != 0)
+               if (err != 0) {
                        dev_warn(&indio_dev->dev,
                                 "Failed to enable specified Vdd supply\n");
+                       return err;
+               }
+       } else {
+               err = PTR_ERR(pdata->vdd);
+               if (err != -ENODEV)
+                       return err;
        }
 
        pdata->vdd_io = devm_regulator_get_optional(indio_dev->dev.parent, "vddio");
        if (!IS_ERR(pdata->vdd_io)) {
                err = regulator_enable(pdata->vdd_io);
-               if (err != 0)
+               if (err != 0) {
                        dev_warn(&indio_dev->dev,
                                 "Failed to enable specified Vdd_IO supply\n");
+                       goto st_sensors_disable_vdd;
+               }
+       } else {
+               err = PTR_ERR(pdata->vdd_io);
+               if (err != -ENODEV)
+                       goto st_sensors_disable_vdd;
        }
+
+       return 0;
+
+st_sensors_disable_vdd:
+       if (!IS_ERR_OR_NULL(pdata->vdd))
+               regulator_disable(pdata->vdd);
+       return err;
 }
 EXPORT_SYMBOL(st_sensors_power_enable);
 
@@ -256,10 +275,10 @@ void st_sensors_power_disable(struct iio_dev *indio_dev)
 {
        struct st_sensor_data *pdata = iio_priv(indio_dev);
 
-       if (!IS_ERR(pdata->vdd))
+       if (!IS_ERR_OR_NULL(pdata->vdd))
                regulator_disable(pdata->vdd);
 
-       if (!IS_ERR(pdata->vdd_io))
+       if (!IS_ERR_OR_NULL(pdata->vdd_io))
                regulator_disable(pdata->vdd_io);
 }
 EXPORT_SYMBOL(st_sensors_power_disable);
@@ -471,7 +490,7 @@ static int st_sensors_read_axis_data(struct iio_dev *indio_dev,
        int err;
        u8 *outdata;
        struct st_sensor_data *sdata = iio_priv(indio_dev);
-       unsigned int byte_for_channel = ch->scan_type.storagebits >> 3;
+       unsigned int byte_for_channel = ch->scan_type.realbits >> 3;
 
        outdata = kmalloc(byte_for_channel, GFP_KERNEL);
        if (!outdata)
@@ -531,7 +550,7 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
                        int num_sensors_list,
                        const struct st_sensor_settings *sensor_settings)
 {
-       int i, n, err;
+       int i, n, err = 0;
        u8 wai;
        struct st_sensor_data *sdata = iio_priv(indio_dev);
 
@@ -551,17 +570,21 @@ int st_sensors_check_device_support(struct iio_dev *indio_dev,
                return -ENODEV;
        }
 
-       err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
-                                       sensor_settings[i].wai_addr, &wai);
-       if (err < 0) {
-               dev_err(&indio_dev->dev, "failed to read Who-Am-I register.\n");
-               return err;
-       }
+       if (sensor_settings[i].wai_addr) {
+               err = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+                                          sensor_settings[i].wai_addr, &wai);
+               if (err < 0) {
+                       dev_err(&indio_dev->dev,
+                               "failed to read Who-Am-I register.\n");
+                       return err;
+               }
 
-       if (sensor_settings[i].wai != wai) {
-               dev_err(&indio_dev->dev, "%s: WhoAmI mismatch (0x%x).\n",
-                                               indio_dev->name, wai);
-               return -EINVAL;
+               if (sensor_settings[i].wai != wai) {
+                       dev_err(&indio_dev->dev,
+                               "%s: WhoAmI mismatch (0x%x).\n",
+                               indio_dev->name, wai);
+                       return -EINVAL;
+               }
        }
 
        sdata->sensor_settings =
index 98cfee296d4622f92119b44af2fd3b1b0e59f8fc..b43aa36031f88a318469882ae011f159408bacf0 100644 (file)
@@ -48,8 +48,8 @@ static int st_sensors_i2c_read_multiple_byte(
        if (multiread_bit)
                reg_addr |= ST_SENSORS_I2C_MULTIREAD;
 
-       return i2c_smbus_read_i2c_block_data(to_i2c_client(dev),
-                                                       reg_addr, len, data);
+       return i2c_smbus_read_i2c_block_data_or_emulated(to_i2c_client(dev),
+                                                        reg_addr, len, data);
 }
 
 static int st_sensors_i2c_write_byte(struct st_sensor_transfer_buffer *tb,
index 296e4ff19ae8d9cb4d40a0a852ecdecefb2e127f..e66f12ee8a5575005745141bca46c8755ae8750c 100644 (file)
 #include <linux/iio/common/st_sensors.h>
 #include "st_sensors_core.h"
 
+/**
+ * st_sensors_new_samples_available() - check if more samples came in
+ * returns:
+ * 0 - no new samples available
+ * 1 - new samples available
+ * negative - error or unknown
+ */
+static int st_sensors_new_samples_available(struct iio_dev *indio_dev,
+                                           struct st_sensor_data *sdata)
+{
+       u8 status;
+       int ret;
+
+       /* How would I know if I can't check it? */
+       if (!sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+               return -EINVAL;
+
+       /* No scan mask, no interrupt */
+       if (!indio_dev->active_scan_mask)
+               return 0;
+
+       ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+                       sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+                       &status);
+       if (ret < 0) {
+               dev_err(sdata->dev,
+                       "error checking samples available\n");
+               return ret;
+       }
+       /*
+        * the lower bits of .active_scan_mask[0] is directly mapped
+        * to the channels on the sensor: either bit 0 for
+        * one-dimensional sensors, or e.g. x,y,z for accelerometers,
+        * gyroscopes or magnetometers. No sensor use more than 3
+        * channels, so cut the other status bits here.
+        */
+       status &= 0x07;
+
+       if (status & (u8)indio_dev->active_scan_mask[0])
+               return 1;
+
+       return 0;
+}
+
 /**
  * st_sensors_irq_handler() - top half of the IRQ-based triggers
  * @irq: irq number
@@ -29,7 +73,7 @@ irqreturn_t st_sensors_irq_handler(int irq, void *p)
        struct st_sensor_data *sdata = iio_priv(indio_dev);
 
        /* Get the time stamp as close in time as possible */
-       sdata->hw_timestamp = iio_get_time_ns();
+       sdata->hw_timestamp = iio_get_time_ns(indio_dev);
        return IRQ_WAKE_THREAD;
 }
 
@@ -43,44 +87,43 @@ irqreturn_t st_sensors_irq_thread(int irq, void *p)
        struct iio_trigger *trig = p;
        struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
        struct st_sensor_data *sdata = iio_priv(indio_dev);
-       int ret;
 
        /*
         * If this trigger is backed by a hardware interrupt and we have a
-        * status register, check if this IRQ came from us
+        * status register, check if this IRQ came from us. Notice that
+        * we will process also if st_sensors_new_samples_available()
+        * returns negative: if we can't check status, then poll
+        * unconditionally.
         */
-       if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
-               u8 status;
-
-               ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
-                          sdata->sensor_settings->drdy_irq.addr_stat_drdy,
-                          &status);
-               if (ret < 0) {
-                       dev_err(sdata->dev, "could not read channel status\n");
-                       goto out_poll;
-               }
-               /*
-                * the lower bits of .active_scan_mask[0] is directly mapped
-                * to the channels on the sensor: either bit 0 for
-                * one-dimensional sensors, or e.g. x,y,z for accelerometers,
-                * gyroscopes or magnetometers. No sensor use more than 3
-                * channels, so cut the other status bits here.
-                */
-               status &= 0x07;
+       if (sdata->hw_irq_trigger &&
+           st_sensors_new_samples_available(indio_dev, sdata)) {
+               iio_trigger_poll_chained(p);
+       } else {
+               dev_dbg(sdata->dev, "spurious IRQ\n");
+               return IRQ_NONE;
+       }
 
-               /*
-                * If this was not caused by any channels on this sensor,
-                * return IRQ_NONE
-                */
-               if (!indio_dev->active_scan_mask)
-                       return IRQ_NONE;
-               if (!(status & (u8)indio_dev->active_scan_mask[0]))
-                       return IRQ_NONE;
+       /*
+        * If we have proper level IRQs the handler will be re-entered if
+        * the line is still active, so return here and come back in through
+        * the top half if need be.
+        */
+       if (!sdata->edge_irq)
+               return IRQ_HANDLED;
+
+       /*
+        * If we are using egde IRQs, new samples arrived while processing
+        * the IRQ and those may be missed unless we pick them here, so poll
+        * again. If the sensor delivery frequency is very high, this thread
+        * turns into a polled loop handler.
+        */
+       while (sdata->hw_irq_trigger &&
+              st_sensors_new_samples_available(indio_dev, sdata)) {
+               dev_dbg(sdata->dev, "more samples came in during polling\n");
+               sdata->hw_timestamp = iio_get_time_ns(indio_dev);
+               iio_trigger_poll_chained(p);
        }
 
-out_poll:
-       /* It's our IRQ: proceed to handle the register polling */
-       iio_trigger_poll_chained(p);
        return IRQ_HANDLED;
 }
 
@@ -107,13 +150,18 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
         * If the IRQ is triggered on falling edge, we need to mark the
         * interrupt as active low, if the hardware supports this.
         */
-       if (irq_trig == IRQF_TRIGGER_FALLING) {
+       switch(irq_trig) {
+       case IRQF_TRIGGER_FALLING:
+       case IRQF_TRIGGER_LOW:
                if (!sdata->sensor_settings->drdy_irq.addr_ihl) {
                        dev_err(&indio_dev->dev,
-                               "falling edge specified for IRQ but hardware "
-                               "only support rising edge, will request "
-                               "rising edge\n");
-                       irq_trig = IRQF_TRIGGER_RISING;
+                               "falling/low specified for IRQ "
+                               "but hardware only support rising/high: "
+                               "will request rising/high\n");
+                       if (irq_trig == IRQF_TRIGGER_FALLING)
+                               irq_trig = IRQF_TRIGGER_RISING;
+                       if (irq_trig == IRQF_TRIGGER_LOW)
+                               irq_trig = IRQF_TRIGGER_HIGH;
                } else {
                        /* Set up INT active low i.e. falling edge */
                        err = st_sensors_write_data_with_mask(indio_dev,
@@ -122,20 +170,39 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                        if (err < 0)
                                goto iio_trigger_free;
                        dev_info(&indio_dev->dev,
-                                "interrupts on the falling edge\n");
+                                "interrupts on the falling edge or "
+                                "active low level\n");
                }
-       } else if (irq_trig == IRQF_TRIGGER_RISING) {
+               break;
+       case IRQF_TRIGGER_RISING:
                dev_info(&indio_dev->dev,
                         "interrupts on the rising edge\n");
-
-       } else {
+               break;
+       case IRQF_TRIGGER_HIGH:
+               dev_info(&indio_dev->dev,
+                        "interrupts active high level\n");
+               break;
+       default:
+               /* This is the most preferred mode, if possible */
                dev_err(&indio_dev->dev,
-               "unsupported IRQ trigger specified (%lx), only "
-                       "rising and falling edges supported, enforce "
+                       "unsupported IRQ trigger specified (%lx), enforce "
                        "rising edge\n", irq_trig);
                irq_trig = IRQF_TRIGGER_RISING;
        }
 
+       /* Tell the interrupt handler that we're dealing with edges */
+       if (irq_trig == IRQF_TRIGGER_FALLING ||
+           irq_trig == IRQF_TRIGGER_RISING)
+               sdata->edge_irq = true;
+       else
+               /*
+                * If we're not using edges (i.e. level interrupts) we
+                * just mask off the IRQ, handle one interrupt, then
+                * if the line is still low, we return to the
+                * interrupt handler top half again and start over.
+                */
+               irq_trig |= IRQF_ONESHOT;
+
        /*
         * If the interrupt pin is Open Drain, by definition this
         * means that the interrupt line may be shared with other
@@ -148,9 +215,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
            sdata->sensor_settings->drdy_irq.addr_stat_drdy)
                irq_trig |= IRQF_SHARED;
 
-       /* Let's create an interrupt thread masking the hard IRQ here */
-       irq_trig |= IRQF_ONESHOT;
-
        err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
                        st_sensors_irq_handler,
                        st_sensors_irq_thread,
index f7c71da42f15e28127492c8894a856a3a04da0e3..ca814479fadfa0773fd7bb580690b91b745d5349 100644 (file)
@@ -248,11 +248,12 @@ config MCP4922
 config STX104
        tristate "Apex Embedded Systems STX104 DAC driver"
        depends on X86 && ISA_BUS_API
+       select GPIOLIB
        help
-         Say yes here to build support for the 2-channel DAC on the Apex
-         Embedded Systems STX104 integrated analog PC/104 card. The base port
-         addresses for the devices may be configured via the "base" module
-         parameter array.
+         Say yes here to build support for the 2-channel DAC and GPIO on the
+         Apex Embedded Systems STX104 integrated analog PC/104 card. The base
+         port addresses for the devices may be configured via the base array
+         module parameter.
 
 config VF610_DAC
        tristate "Vybrid vf610 DAC driver"
index 968712be967f44bc73d5d34f04edbcc492f69589..559061ab1982d03fce3b38400efc34234bb47ca2 100644 (file)
@@ -242,7 +242,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data)
                                        0,
                                        IIO_EV_TYPE_THRESH,
                                        IIO_EV_DIR_RISING),
-                       iio_get_time_ns());
+                       iio_get_time_ns(indio_dev));
                }
 
                if (events & AD5421_FAULT_UNDER_CURRENT) {
@@ -251,7 +251,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data)
                                        0,
                                        IIO_EV_TYPE_THRESH,
                                        IIO_EV_DIR_FALLING),
-                               iio_get_time_ns());
+                               iio_get_time_ns(indio_dev));
                }
 
                if (events & AD5421_FAULT_TEMP_OVER_140) {
@@ -260,7 +260,7 @@ static irqreturn_t ad5421_fault_handler(int irq, void *data)
                                        0,
                                        IIO_EV_TYPE_MAG,
                                        IIO_EV_DIR_RISING),
-                               iio_get_time_ns());
+                               iio_get_time_ns(indio_dev));
                }
 
                old_fault = fault;
index 4e4c20d6d8b57a763bb043eaf47af0d76709204c..788b3d6fd1cc973ae398baeaf5b4a88e2207186c 100644 (file)
@@ -223,7 +223,7 @@ static irqreturn_t ad5504_event_handler(int irq, void *private)
                                            0,
                                            IIO_EV_TYPE_THRESH,
                                            IIO_EV_DIR_RISING),
-                      iio_get_time_ns());
+                      iio_get_time_ns((struct iio_dev *)private));
 
        return IRQ_HANDLED;
 }
index bfb350a85a16dac01b7ce25984bea3242eafbfb2..0fde593ec0d93bcc53b3f255ccc2b1db75198a43 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/sysfs.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
 #include <linux/platform_data/ad5755.h>
@@ -109,6 +110,51 @@ enum ad5755_type {
        ID_AD5737,
 };
 
+#ifdef CONFIG_OF
+static const int ad5755_dcdc_freq_table[][2] = {
+       { 250000, AD5755_DC_DC_FREQ_250kHZ },
+       { 410000, AD5755_DC_DC_FREQ_410kHZ },
+       { 650000, AD5755_DC_DC_FREQ_650kHZ }
+};
+
+static const int ad5755_dcdc_maxv_table[][2] = {
+       { 23000000, AD5755_DC_DC_MAXV_23V },
+       { 24500000, AD5755_DC_DC_MAXV_24V5 },
+       { 27000000, AD5755_DC_DC_MAXV_27V },
+       { 29500000, AD5755_DC_DC_MAXV_29V5 },
+};
+
+static const int ad5755_slew_rate_table[][2] = {
+       { 64000, AD5755_SLEW_RATE_64k },
+       { 32000, AD5755_SLEW_RATE_32k },
+       { 16000, AD5755_SLEW_RATE_16k },
+       { 8000, AD5755_SLEW_RATE_8k },
+       { 4000, AD5755_SLEW_RATE_4k },
+       { 2000, AD5755_SLEW_RATE_2k },
+       { 1000, AD5755_SLEW_RATE_1k },
+       { 500, AD5755_SLEW_RATE_500 },
+       { 250, AD5755_SLEW_RATE_250 },
+       { 125, AD5755_SLEW_RATE_125 },
+       { 64, AD5755_SLEW_RATE_64 },
+       { 32, AD5755_SLEW_RATE_32 },
+       { 16, AD5755_SLEW_RATE_16 },
+       { 8, AD5755_SLEW_RATE_8 },
+       { 4, AD5755_SLEW_RATE_4 },
+       { 0, AD5755_SLEW_RATE_0_5 },
+};
+
+static const int ad5755_slew_step_table[][2] = {
+       { 256, AD5755_SLEW_STEP_SIZE_256 },
+       { 128, AD5755_SLEW_STEP_SIZE_128 },
+       { 64, AD5755_SLEW_STEP_SIZE_64 },
+       { 32, AD5755_SLEW_STEP_SIZE_32 },
+       { 16, AD5755_SLEW_STEP_SIZE_16 },
+       { 4, AD5755_SLEW_STEP_SIZE_4 },
+       { 2, AD5755_SLEW_STEP_SIZE_2 },
+       { 1, AD5755_SLEW_STEP_SIZE_1 },
+};
+#endif
+
 static int ad5755_write_unlocked(struct iio_dev *indio_dev,
        unsigned int reg, unsigned int val)
 {
@@ -556,6 +602,129 @@ static const struct ad5755_platform_data ad5755_default_pdata = {
        },
 };
 
+#ifdef CONFIG_OF
+static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
+{
+       struct device_node *np = dev->of_node;
+       struct device_node *pp;
+       struct ad5755_platform_data *pdata;
+       unsigned int tmp;
+       unsigned int tmparray[3];
+       int devnr, i;
+
+       pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return NULL;
+
+       pdata->ext_dc_dc_compenstation_resistor =
+           of_property_read_bool(np, "adi,ext-dc-dc-compenstation-resistor");
+
+       if (!of_property_read_u32(np, "adi,dc-dc-phase", &tmp))
+               pdata->dc_dc_phase = tmp;
+       else
+               pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE;
+
+       pdata->dc_dc_freq = AD5755_DC_DC_FREQ_410kHZ;
+       if (!of_property_read_u32(np, "adi,dc-dc-freq-hz", &tmp)) {
+               for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_freq_table); i++) {
+                       if (tmp == ad5755_dcdc_freq_table[i][0]) {
+                               pdata->dc_dc_freq = ad5755_dcdc_freq_table[i][1];
+                               break;
+                       }
+               }
+
+               if (i == ARRAY_SIZE(ad5755_dcdc_freq_table)) {
+                       dev_err(dev,
+                               "adi,dc-dc-freq out of range selecting 410kHz");
+               }
+       }
+
+       pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V;
+       if (!of_property_read_u32(np, "adi,dc-dc-max-microvolt", &tmp)) {
+               for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_maxv_table); i++) {
+                       if (tmp == ad5755_dcdc_maxv_table[i][0]) {
+                               pdata->dc_dc_maxv = ad5755_dcdc_maxv_table[i][1];
+                               break;
+                       }
+               }
+               if (i == ARRAY_SIZE(ad5755_dcdc_maxv_table)) {
+                               dev_err(dev,
+                                       "adi,dc-dc-maxv out of range selecting 23V");
+               }
+       }
+
+       devnr = 0;
+       for_each_child_of_node(np, pp) {
+               if (devnr > AD5755_NUM_CHANNELS) {
+                       dev_err(dev,
+                               "There is to many channels defined in DT\n");
+                       goto error_out;
+               }
+
+               if (!of_property_read_u32(pp, "adi,mode", &tmp))
+                       pdata->dac[devnr].mode = tmp;
+               else
+                       pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA;
+
+               pdata->dac[devnr].ext_current_sense_resistor =
+                   of_property_read_bool(pp, "adi,ext-current-sense-resistor");
+
+               pdata->dac[devnr].enable_voltage_overrange =
+                   of_property_read_bool(pp, "adi,enable-voltage-overrange");
+
+               if (!of_property_read_u32_array(pp, "adi,slew", tmparray, 3)) {
+                       pdata->dac[devnr].slew.enable = tmparray[0];
+
+                       pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k;
+                       for (i = 0; i < ARRAY_SIZE(ad5755_slew_rate_table); i++) {
+                               if (tmparray[1] == ad5755_slew_rate_table[i][0]) {
+                                       pdata->dac[devnr].slew.rate =
+                                               ad5755_slew_rate_table[i][1];
+                                       break;
+                               }
+                       }
+                       if (i == ARRAY_SIZE(ad5755_slew_rate_table)) {
+                               dev_err(dev,
+                                       "channel %d slew rate out of range selecting 64kHz",
+                                       devnr);
+                       }
+
+                       pdata->dac[devnr].slew.step_size = AD5755_SLEW_STEP_SIZE_1;
+                       for (i = 0; i < ARRAY_SIZE(ad5755_slew_step_table); i++) {
+                               if (tmparray[2] == ad5755_slew_step_table[i][0]) {
+                                       pdata->dac[devnr].slew.step_size =
+                                               ad5755_slew_step_table[i][1];
+                                       break;
+                               }
+                       }
+                       if (i == ARRAY_SIZE(ad5755_slew_step_table)) {
+                               dev_err(dev,
+                                       "channel %d slew step size out of range selecting 1 LSB",
+                                       devnr);
+                       }
+               } else {
+                       pdata->dac[devnr].slew.enable = false;
+                       pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k;
+                       pdata->dac[devnr].slew.step_size =
+                           AD5755_SLEW_STEP_SIZE_1;
+               }
+               devnr++;
+       }
+
+       return pdata;
+
+ error_out:
+       devm_kfree(dev, pdata);
+       return NULL;
+}
+#else
+static
+struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
+{
+       return NULL;
+}
+#endif
+
 static int ad5755_probe(struct spi_device *spi)
 {
        enum ad5755_type type = spi_get_device_id(spi)->driver_data;
@@ -583,8 +752,15 @@ static int ad5755_probe(struct spi_device *spi)
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->num_channels = AD5755_NUM_CHANNELS;
 
-       if (!pdata)
+       if (spi->dev.of_node)
+               pdata = ad5755_parse_dt(&spi->dev);
+       else
+               pdata = spi->dev.platform_data;
+
+       if (!pdata) {
+               dev_warn(&spi->dev, "no platform data? using default\n");
                pdata = &ad5755_default_pdata;
+       }
 
        ret = ad5755_init_channels(indio_dev, pdata);
        if (ret)
@@ -607,6 +783,16 @@ static const struct spi_device_id ad5755_id[] = {
 };
 MODULE_DEVICE_TABLE(spi, ad5755_id);
 
+static const struct of_device_id ad5755_of_match[] = {
+       { .compatible = "adi,ad5755" },
+       { .compatible = "adi,ad5755-1" },
+       { .compatible = "adi,ad5757" },
+       { .compatible = "adi,ad5735" },
+       { .compatible = "adi,ad5737" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ad5755_of_match);
+
 static struct spi_driver ad5755_driver = {
        .driver = {
                .name = "ad5755",
index 27941220872f6f7fd1c76c8a40030948395c553f..792a97164cb28e04dd25d44cdffd8541b05f19c4 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/bitops.h>
 #include <linux/device.h>
 #include <linux/errno.h>
+#include <linux/gpio/driver.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/types.h>
 #include <linux/io.h>
@@ -21,6 +22,7 @@
 #include <linux/isa.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
+#include <linux/spinlock.h>
 
 #define STX104_NUM_CHAN 2
 
@@ -49,6 +51,20 @@ struct stx104_iio {
        unsigned base;
 };
 
+/**
+ * struct stx104_gpio - GPIO device private data structure
+ * @chip:      instance of the gpio_chip
+ * @lock:      synchronization lock to prevent I/O race conditions
+ * @base:      base port address of the GPIO device
+ * @out_state: output bits state
+ */
+struct stx104_gpio {
+       struct gpio_chip chip;
+       spinlock_t lock;
+       unsigned int base;
+       unsigned int out_state;
+};
+
 static int stx104_read_raw(struct iio_dev *indio_dev,
        struct iio_chan_spec const *chan, int *val, int *val2, long mask)
 {
@@ -88,15 +104,81 @@ static const struct iio_chan_spec stx104_channels[STX104_NUM_CHAN] = {
        STX104_CHAN(1)
 };
 
+static int stx104_gpio_get_direction(struct gpio_chip *chip,
+       unsigned int offset)
+{
+       if (offset < 4)
+               return 1;
+
+       return 0;
+}
+
+static int stx104_gpio_direction_input(struct gpio_chip *chip,
+       unsigned int offset)
+{
+       if (offset >= 4)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int stx104_gpio_direction_output(struct gpio_chip *chip,
+       unsigned int offset, int value)
+{
+       if (offset < 4)
+               return -EINVAL;
+
+       chip->set(chip, offset, value);
+       return 0;
+}
+
+static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+       struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+
+       if (offset >= 4)
+               return -EINVAL;
+
+       return !!(inb(stx104gpio->base) & BIT(offset));
+}
+
+static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset,
+       int value)
+{
+       struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip);
+       const unsigned int mask = BIT(offset) >> 4;
+       unsigned long flags;
+
+       if (offset < 4)
+               return;
+
+       spin_lock_irqsave(&stx104gpio->lock, flags);
+
+       if (value)
+               stx104gpio->out_state |= mask;
+       else
+               stx104gpio->out_state &= ~mask;
+
+       outb(stx104gpio->out_state, stx104gpio->base);
+
+       spin_unlock_irqrestore(&stx104gpio->lock, flags);
+}
+
 static int stx104_probe(struct device *dev, unsigned int id)
 {
        struct iio_dev *indio_dev;
        struct stx104_iio *priv;
+       struct stx104_gpio *stx104gpio;
+       int err;
 
        indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
        if (!indio_dev)
                return -ENOMEM;
 
+       stx104gpio = devm_kzalloc(dev, sizeof(*stx104gpio), GFP_KERNEL);
+       if (!stx104gpio)
+               return -ENOMEM;
+
        if (!devm_request_region(dev, base[id], STX104_EXTENT,
                dev_name(dev))) {
                dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -117,14 +199,53 @@ static int stx104_probe(struct device *dev, unsigned int id)
        outw(0, base[id] + 4);
        outw(0, base[id] + 6);
 
-       return devm_iio_device_register(dev, indio_dev);
+       err = devm_iio_device_register(dev, indio_dev);
+       if (err) {
+               dev_err(dev, "IIO device registering failed (%d)\n", err);
+               return err;
+       }
+
+       stx104gpio->chip.label = dev_name(dev);
+       stx104gpio->chip.parent = dev;
+       stx104gpio->chip.owner = THIS_MODULE;
+       stx104gpio->chip.base = -1;
+       stx104gpio->chip.ngpio = 8;
+       stx104gpio->chip.get_direction = stx104_gpio_get_direction;
+       stx104gpio->chip.direction_input = stx104_gpio_direction_input;
+       stx104gpio->chip.direction_output = stx104_gpio_direction_output;
+       stx104gpio->chip.get = stx104_gpio_get;
+       stx104gpio->chip.set = stx104_gpio_set;
+       stx104gpio->base = base[id] + 3;
+       stx104gpio->out_state = 0x0;
+
+       spin_lock_init(&stx104gpio->lock);
+
+       dev_set_drvdata(dev, stx104gpio);
+
+       err = gpiochip_add_data(&stx104gpio->chip, stx104gpio);
+       if (err) {
+               dev_err(dev, "GPIO registering failed (%d)\n", err);
+               return err;
+       }
+
+       return 0;
+}
+
+static int stx104_remove(struct device *dev, unsigned int id)
+{
+       struct stx104_gpio *const stx104gpio = dev_get_drvdata(dev);
+
+       gpiochip_remove(&stx104gpio->chip);
+
+       return 0;
 }
 
 static struct isa_driver stx104_driver = {
        .probe = stx104_probe,
        .driver = {
                .name = "stx104"
-       }
+       },
+       .remove = stx104_remove
 };
 
 module_isa_driver(stx104_driver, num_stx104);
index 71805ced1aae393a1d5fc0a31aebdc9b2af0b69f..aa5824d96a4366a19e6875f499f4d437c5b51ba6 100644 (file)
@@ -10,6 +10,7 @@ config IIO_DUMMY_EVGEN
 
 config IIO_SIMPLE_DUMMY
        tristate "An example driver with no hardware requirements"
+       depends on IIO_SW_DEVICE
        help
         Driver intended mainly as documentation for how to write
         a driver. May also be useful for testing userspace code
index 43fe4ba7d0dcdac2f5d30666139eb2270001080f..ad3410e528b68fbc2cc281983da4a19921fa401b 100644 (file)
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#include <linux/string.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
 #include <linux/iio/events.h>
 #include <linux/iio/buffer.h>
+#include <linux/iio/sw_device.h>
 #include "iio_simple_dummy.h"
 
-/*
- * A few elements needed to fake a bus for this driver
- * Note instances parameter controls how many of these
- * dummy devices are registered.
- */
-static unsigned instances = 1;
-module_param(instances, uint, 0);
-
-/* Pointer array used to fake bus elements */
-static struct iio_dev **iio_dummy_devs;
-
-/* Fake a name for the part number, usually obtained from the id table */
-static const char *iio_dummy_part_number = "iio_dummy_part_no";
+static struct config_item_type iio_dummy_type = {
+       .ct_owner = THIS_MODULE,
+};
 
 /**
  * struct iio_dummy_accel_calibscale - realworld to register mapping
@@ -572,12 +564,18 @@ static int iio_dummy_init_device(struct iio_dev *indio_dev)
  *                      const struct i2c_device_id *id)
  * SPI: iio_dummy_probe(struct spi_device *spi)
  */
-static int iio_dummy_probe(int index)
+static struct iio_sw_device *iio_dummy_probe(const char *name)
 {
        int ret;
        struct iio_dev *indio_dev;
        struct iio_dummy_state *st;
+       struct iio_sw_device *swd;
 
+       swd = kzalloc(sizeof(*swd), GFP_KERNEL);
+       if (!swd) {
+               ret = -ENOMEM;
+               goto error_kzalloc;
+       }
        /*
         * Allocate an IIO device.
         *
@@ -608,7 +606,7 @@ static int iio_dummy_probe(int index)
         * i2c_set_clientdata(client, indio_dev);
         * spi_set_drvdata(spi, indio_dev);
         */
-       iio_dummy_devs[index] = indio_dev;
+       swd->device = indio_dev;
 
        /*
         * Set the device name.
@@ -619,7 +617,7 @@ static int iio_dummy_probe(int index)
         *    indio_dev->name = id->name;
         *    indio_dev->name = spi_get_device_id(spi)->name;
         */
-       indio_dev->name = iio_dummy_part_number;
+       indio_dev->name = kstrdup(name, GFP_KERNEL);
 
        /* Provide description of available channels */
        indio_dev->channels = iio_dummy_channels;
@@ -646,7 +644,9 @@ static int iio_dummy_probe(int index)
        if (ret < 0)
                goto error_unconfigure_buffer;
 
-       return 0;
+       iio_swd_group_init_type_name(swd, name, &iio_dummy_type);
+
+       return swd;
 error_unconfigure_buffer:
        iio_simple_dummy_unconfigure_buffer(indio_dev);
 error_unregister_events:
@@ -654,16 +654,18 @@ error_unregister_events:
 error_free_device:
        iio_device_free(indio_dev);
 error_ret:
-       return ret;
+       kfree(swd);
+error_kzalloc:
+       return ERR_PTR(ret);
 }
 
 /**
  * iio_dummy_remove() - device instance removal function
- * @index: device index.
+ * @swd: pointer to software IIO device abstraction
  *
  * Parameters follow those of iio_dummy_probe for buses.
  */
-static void iio_dummy_remove(int index)
+static int iio_dummy_remove(struct iio_sw_device *swd)
 {
        /*
         * Get a pointer to the device instance iio_dev structure
@@ -671,7 +673,7 @@ static void iio_dummy_remove(int index)
         * struct iio_dev *indio_dev = i2c_get_clientdata(client);
         * struct iio_dev *indio_dev = spi_get_drvdata(spi);
         */
-       struct iio_dev *indio_dev = iio_dummy_devs[index];
+       struct iio_dev *indio_dev = swd->device;
 
        /* Unregister the device */
        iio_device_unregister(indio_dev);
@@ -684,11 +686,13 @@ static void iio_dummy_remove(int index)
        iio_simple_dummy_events_unregister(indio_dev);
 
        /* Free all structures */
+       kfree(indio_dev->name);
        iio_device_free(indio_dev);
-}
 
+       return 0;
+}
 /**
- * iio_dummy_init() -  device driver registration
+ * module_iio_sw_device_driver() -  device driver registration
  *
  * Varies depending on bus type of the device. As there is no device
  * here, call probe directly. For information on device registration
@@ -697,50 +701,18 @@ static void iio_dummy_remove(int index)
  * spi:
  * Documentation/spi/spi-summary
  */
-static __init int iio_dummy_init(void)
-{
-       int i, ret;
-
-       if (instances > 10) {
-               instances = 1;
-               return -EINVAL;
-       }
-
-       /* Fake a bus */
-       iio_dummy_devs = kcalloc(instances, sizeof(*iio_dummy_devs),
-                                GFP_KERNEL);
-       /* Here we have no actual device so call probe */
-       for (i = 0; i < instances; i++) {
-               ret = iio_dummy_probe(i);
-               if (ret < 0)
-                       goto error_remove_devs;
-       }
-       return 0;
-
-error_remove_devs:
-       while (i--)
-               iio_dummy_remove(i);
-
-       kfree(iio_dummy_devs);
-       return ret;
-}
-module_init(iio_dummy_init);
+static const struct iio_sw_device_ops iio_dummy_device_ops = {
+       .probe = iio_dummy_probe,
+       .remove = iio_dummy_remove,
+};
 
-/**
- * iio_dummy_exit() - device driver removal
- *
- * Varies depending on bus type of the device.
- * As there is no device here, call remove directly.
- */
-static __exit void iio_dummy_exit(void)
-{
-       int i;
+static struct iio_sw_device_type iio_dummy_device = {
+       .name = "dummy",
+       .owner = THIS_MODULE,
+       .ops = &iio_dummy_device_ops,
+};
 
-       for (i = 0; i < instances; i++)
-               iio_dummy_remove(i);
-       kfree(iio_dummy_devs);
-}
-module_exit(iio_dummy_exit);
+module_iio_sw_device_driver(iio_dummy_device);
 
 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
 MODULE_DESCRIPTION("IIO dummy driver");
index cf44a6f7943118bab29c335edd807cb4f186e7b8..b383892a519372c21fd3db1e437a87e39359b88a 100644 (file)
@@ -85,7 +85,8 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
                }
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, data, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, data,
+                                          iio_get_time_ns(indio_dev));
 
        kfree(data);
 
index 6eb600ff70569ef6ceb902afcc49ddce08b09953..ed63ffd849f8201505499d2be4594572b09b535d 100644 (file)
@@ -158,7 +158,7 @@ static irqreturn_t iio_simple_dummy_get_timestamp(int irq, void *private)
        struct iio_dev *indio_dev = private;
        struct iio_dummy_state *st = iio_priv(indio_dev);
 
-       st->event_timestamp = iio_get_time_ns();
+       st->event_timestamp = iio_get_time_ns(indio_dev);
        return IRQ_WAKE_THREAD;
 }
 
index 7ccc044063f65e3429f170571d318b026be9f59d..f7fcfa886f72181e36b89b38daa327dbf8d17cd0 100644 (file)
@@ -50,6 +50,7 @@
 #define BMG160_REG_PMU_BW              0x10
 #define BMG160_NO_FILTER               0
 #define BMG160_DEF_BW                  100
+#define BMG160_REG_PMU_BW_RES          BIT(7)
 
 #define BMG160_REG_INT_MAP_0           0x17
 #define BMG160_INT_MAP_0_BIT_ANY       BIT(1)
@@ -100,7 +101,6 @@ struct bmg160_data {
        struct iio_trigger *motion_trig;
        struct mutex mutex;
        s16 buffer[8];
-       u8 bw_bits;
        u32 dps_range;
        int ev_enable_state;
        int slope_thres;
@@ -117,13 +117,16 @@ enum bmg160_axis {
 };
 
 static const struct {
-       int val;
+       int odr;
+       int filter;
        int bw_bits;
-} bmg160_samp_freq_table[] = { {100, 0x07},
-                              {200, 0x06},
-                              {400, 0x03},
-                              {1000, 0x02},
-                              {2000, 0x01} };
+} bmg160_samp_freq_table[] = { {100, 32, 0x07},
+                              {200, 64, 0x06},
+                              {100, 12, 0x05},
+                              {200, 23, 0x04},
+                              {400, 47, 0x03},
+                              {1000, 116, 0x02},
+                              {2000, 230, 0x01} };
 
 static const struct {
        int scale;
@@ -153,7 +156,7 @@ static int bmg160_convert_freq_to_bit(int val)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
-               if (bmg160_samp_freq_table[i].val == val)
+               if (bmg160_samp_freq_table[i].odr == val)
                        return bmg160_samp_freq_table[i].bw_bits;
        }
 
@@ -176,7 +179,53 @@ static int bmg160_set_bw(struct bmg160_data *data, int val)
                return ret;
        }
 
-       data->bw_bits = bw_bits;
+       return 0;
+}
+
+static int bmg160_get_filter(struct bmg160_data *data, int *val)
+{
+       struct device *dev = regmap_get_device(data->regmap);
+       int ret;
+       int i;
+       unsigned int bw_bits;
+
+       ret = regmap_read(data->regmap, BMG160_REG_PMU_BW, &bw_bits);
+       if (ret < 0) {
+               dev_err(dev, "Error reading reg_pmu_bw\n");
+               return ret;
+       }
+
+       /* Ignore the readonly reserved bit. */
+       bw_bits &= ~BMG160_REG_PMU_BW_RES;
+
+       for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
+               if (bmg160_samp_freq_table[i].bw_bits == bw_bits)
+                       break;
+       }
+
+       *val = bmg160_samp_freq_table[i].filter;
+
+       return ret ? ret : IIO_VAL_INT;
+}
+
+
+static int bmg160_set_filter(struct bmg160_data *data, int val)
+{
+       struct device *dev = regmap_get_device(data->regmap);
+       int ret;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
+               if (bmg160_samp_freq_table[i].filter == val)
+                       break;
+       }
+
+       ret = regmap_write(data->regmap, BMG160_REG_PMU_BW,
+                          bmg160_samp_freq_table[i].bw_bits);
+       if (ret < 0) {
+               dev_err(dev, "Error writing reg_pmu_bw\n");
+               return ret;
+       }
 
        return 0;
 }
@@ -386,11 +435,23 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
 
 static int bmg160_get_bw(struct bmg160_data *data, int *val)
 {
+       struct device *dev = regmap_get_device(data->regmap);   
        int i;
+       unsigned int bw_bits;
+       int ret;
+
+       ret = regmap_read(data->regmap, BMG160_REG_PMU_BW, &bw_bits);
+       if (ret < 0) {
+               dev_err(dev, "Error reading reg_pmu_bw\n");
+               return ret;
+       }
+
+       /* Ignore the readonly reserved bit. */
+       bw_bits &= ~BMG160_REG_PMU_BW_RES;
 
        for (i = 0; i < ARRAY_SIZE(bmg160_samp_freq_table); ++i) {
-               if (bmg160_samp_freq_table[i].bw_bits == data->bw_bits) {
-                       *val = bmg160_samp_freq_table[i].val;
+               if (bmg160_samp_freq_table[i].bw_bits == bw_bits) {
+                       *val = bmg160_samp_freq_table[i].odr;
                        return IIO_VAL_INT;
                }
        }
@@ -507,6 +568,8 @@ static int bmg160_read_raw(struct iio_dev *indio_dev,
                        return IIO_VAL_INT;
                } else
                        return -EINVAL;
+       case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+               return bmg160_get_filter(data, val);
        case IIO_CHAN_INFO_SCALE:
                *val = 0;
                switch (chan->type) {
@@ -571,6 +634,26 @@ static int bmg160_write_raw(struct iio_dev *indio_dev,
                ret = bmg160_set_power_state(data, false);
                mutex_unlock(&data->mutex);
                return ret;
+       case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+               if (val2)
+                       return -EINVAL;
+
+               mutex_lock(&data->mutex);
+               ret = bmg160_set_power_state(data, true);
+               if (ret < 0) {
+                       bmg160_set_power_state(data, false);
+                       mutex_unlock(&data->mutex);
+                       return ret;
+               }
+               ret = bmg160_set_filter(data, val);
+               if (ret < 0) {
+                       bmg160_set_power_state(data, false);
+                       mutex_unlock(&data->mutex);
+                       return ret;
+               }
+               ret = bmg160_set_power_state(data, false);
+               mutex_unlock(&data->mutex);
+               return ret;
        case IIO_CHAN_INFO_SCALE:
                if (val)
                        return -EINVAL;
@@ -728,7 +811,8 @@ static const struct iio_event_spec bmg160_event = {
        .channel2 = IIO_MOD_##_axis,                                    \
        .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),                   \
        .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |          \
-                                   BIT(IIO_CHAN_INFO_SAMP_FREQ),       \
+               BIT(IIO_CHAN_INFO_SAMP_FREQ) |                          \
+               BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),       \
        .scan_index = AXIS_##_axis,                                     \
        .scan_type = {                                                  \
                .sign = 's',                                            \
@@ -885,25 +969,25 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
 
        if (val & BMG160_ANY_MOTION_BIT_X)
                iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
-                                                       0,
-                                                       IIO_MOD_X,
-                                                       IIO_EV_TYPE_ROC,
-                                                       dir),
-                                                       iio_get_time_ns());
+                                                            0,
+                                                            IIO_MOD_X,
+                                                            IIO_EV_TYPE_ROC,
+                                                            dir),
+                              iio_get_time_ns(indio_dev));
        if (val & BMG160_ANY_MOTION_BIT_Y)
                iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
-                                                       0,
-                                                       IIO_MOD_Y,
-                                                       IIO_EV_TYPE_ROC,
-                                                       dir),
-                                                       iio_get_time_ns());
+                                                            0,
+                                                            IIO_MOD_Y,
+                                                            IIO_EV_TYPE_ROC,
+                                                            dir),
+                              iio_get_time_ns(indio_dev));
        if (val & BMG160_ANY_MOTION_BIT_Z)
                iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
-                                                       0,
-                                                       IIO_MOD_Z,
-                                                       IIO_EV_TYPE_ROC,
-                                                       dir),
-                                                       iio_get_time_ns());
+                                                            0,
+                                                            IIO_MOD_Z,
+                                                            IIO_EV_TYPE_ROC,
+                                                            dir),
+                              iio_get_time_ns(indio_dev));
 
 ack_intr_status:
        if (!data->dready_trigger_on) {
index a8012955a1f6d47130f703ced0df6a5e6c424407..aea034d8fe0fb42495de198c1465246d0343c8c8 100644 (file)
@@ -426,13 +426,15 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
        indio_dev->info = &gyro_info;
        mutex_init(&gdata->tb.buf_lock);
 
-       st_sensors_power_enable(indio_dev);
+       err = st_sensors_power_enable(indio_dev);
+       if (err)
+               return err;
 
        err = st_sensors_check_device_support(indio_dev,
                                        ARRAY_SIZE(st_gyro_sensors_settings),
                                        st_gyro_sensors_settings);
        if (err < 0)
-               return err;
+               goto st_gyro_power_off;
 
        gdata->num_data_channels = ST_GYRO_NUMBER_DATA_CHANNELS;
        gdata->multiread_bit = gdata->sensor_settings->multi_read_bit;
@@ -446,11 +448,11 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
        err = st_sensors_init_sensor(indio_dev,
                                (struct st_sensors_platform_data *)&gyro_pdata);
        if (err < 0)
-               return err;
+               goto st_gyro_power_off;
 
        err = st_gyro_allocate_ring(indio_dev);
        if (err < 0)
-               return err;
+               goto st_gyro_power_off;
 
        if (irq > 0) {
                err = st_sensors_allocate_trigger(indio_dev,
@@ -473,6 +475,8 @@ st_gyro_device_register_error:
                st_sensors_deallocate_trigger(indio_dev);
 st_gyro_probe_trigger_error:
        st_gyro_deallocate_ring(indio_dev);
+st_gyro_power_off:
+       st_sensors_power_disable(indio_dev);
 
        return err;
 }
index 88e43f87b9268bbba7081e50419fb2bdc4756f1d..9a081465c42f4225d26747a734c5daceea68c842 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * AFE4403 Heart Rate Monitors and Low-Cost Pulse Oximeters
  *
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
  *     Andrew F. Davis <afd@ti.com>
  *
  * This program is free software; you can redistribute it and/or modify
 #define AFE4403_TIAGAIN                        0x20
 #define AFE4403_TIA_AMB_GAIN           0x21
 
-/* AFE4403 GAIN register fields */
-#define AFE4403_TIAGAIN_RES_MASK       GENMASK(2, 0)
-#define AFE4403_TIAGAIN_RES_SHIFT      0
-#define AFE4403_TIAGAIN_CAP_MASK       GENMASK(7, 3)
-#define AFE4403_TIAGAIN_CAP_SHIFT      3
-
-/* AFE4403 LEDCNTRL register fields */
-#define AFE440X_LEDCNTRL_LED1_MASK             GENMASK(15, 8)
-#define AFE440X_LEDCNTRL_LED1_SHIFT            8
-#define AFE440X_LEDCNTRL_LED2_MASK             GENMASK(7, 0)
-#define AFE440X_LEDCNTRL_LED2_SHIFT            0
-#define AFE440X_LEDCNTRL_LED_RANGE_MASK                GENMASK(17, 16)
-#define AFE440X_LEDCNTRL_LED_RANGE_SHIFT       16
-
-/* AFE4403 CONTROL2 register fields */
-#define AFE440X_CONTROL2_PWR_DWN_TX    BIT(2)
-#define AFE440X_CONTROL2_EN_SLOW_DIAG  BIT(8)
-#define AFE440X_CONTROL2_DIAG_OUT_TRI  BIT(10)
-#define AFE440X_CONTROL2_TX_BRDG_MOD   BIT(11)
-#define AFE440X_CONTROL2_TX_REF_MASK   GENMASK(18, 17)
-#define AFE440X_CONTROL2_TX_REF_SHIFT  17
-
-/* AFE4404 NULL fields */
-#define NULL_MASK      0
-#define NULL_SHIFT     0
-
-/* AFE4403 LEDCNTRL values */
-#define AFE440X_LEDCNTRL_RANGE_TX_HALF 0x1
-#define AFE440X_LEDCNTRL_RANGE_TX_FULL 0x2
-#define AFE440X_LEDCNTRL_RANGE_TX_OFF  0x3
-
-/* AFE4403 CONTROL2 values */
-#define AFE440X_CONTROL2_TX_REF_025    0x0
-#define AFE440X_CONTROL2_TX_REF_050    0x1
-#define AFE440X_CONTROL2_TX_REF_100    0x2
-#define AFE440X_CONTROL2_TX_REF_075    0x3
-
-/* AFE4403 CONTROL3 values */
-#define AFE440X_CONTROL3_CLK_DIV_2     0x0
-#define AFE440X_CONTROL3_CLK_DIV_4     0x2
-#define AFE440X_CONTROL3_CLK_DIV_6     0x3
-#define AFE440X_CONTROL3_CLK_DIV_8     0x4
-#define AFE440X_CONTROL3_CLK_DIV_12    0x5
-#define AFE440X_CONTROL3_CLK_DIV_1     0x7
-
-/* AFE4403 TIAGAIN_CAP values */
-#define AFE4403_TIAGAIN_CAP_5_P                0x0
-#define AFE4403_TIAGAIN_CAP_10_P       0x1
-#define AFE4403_TIAGAIN_CAP_20_P       0x2
-#define AFE4403_TIAGAIN_CAP_30_P       0x3
-#define AFE4403_TIAGAIN_CAP_55_P       0x8
-#define AFE4403_TIAGAIN_CAP_155_P      0x10
-
-/* AFE4403 TIAGAIN_RES values */
-#define AFE4403_TIAGAIN_RES_500_K      0x0
-#define AFE4403_TIAGAIN_RES_250_K      0x1
-#define AFE4403_TIAGAIN_RES_100_K      0x2
-#define AFE4403_TIAGAIN_RES_50_K       0x3
-#define AFE4403_TIAGAIN_RES_25_K       0x4
-#define AFE4403_TIAGAIN_RES_10_K       0x5
-#define AFE4403_TIAGAIN_RES_1_M                0x6
-#define AFE4403_TIAGAIN_RES_NONE       0x7
+enum afe4403_fields {
+       /* Gains */
+       F_RF_LED1, F_CF_LED1,
+       F_RF_LED, F_CF_LED,
+
+       /* LED Current */
+       F_ILED1, F_ILED2,
+
+       /* sentinel */
+       F_MAX_FIELDS
+};
+
+static const struct reg_field afe4403_reg_fields[] = {
+       /* Gains */
+       [F_RF_LED1]     = REG_FIELD(AFE4403_TIAGAIN, 0, 2),
+       [F_CF_LED1]     = REG_FIELD(AFE4403_TIAGAIN, 3, 7),
+       [F_RF_LED]      = REG_FIELD(AFE4403_TIA_AMB_GAIN, 0, 2),
+       [F_CF_LED]      = REG_FIELD(AFE4403_TIA_AMB_GAIN, 3, 7),
+       /* LED Current */
+       [F_ILED1]       = REG_FIELD(AFE440X_LEDCNTRL, 0, 7),
+       [F_ILED2]       = REG_FIELD(AFE440X_LEDCNTRL, 8, 15),
+};
 
 /**
- * struct afe4403_data
- * @dev - Device structure
- * @spi - SPI device handle
- * @regmap - Register map of the device
- * @regulator - Pointer to the regulator for the IC
- * @trig - IIO trigger for this device
- * @irq - ADC_RDY line interrupt number
+ * struct afe4403_data - AFE4403 device instance data
+ * @dev: Device structure
+ * @spi: SPI device handle
+ * @regmap: Register map of the device
+ * @fields: Register fields of the device
+ * @regulator: Pointer to the regulator for the IC
+ * @trig: IIO trigger for this device
+ * @irq: ADC_RDY line interrupt number
  */
 struct afe4403_data {
        struct device *dev;
        struct spi_device *spi;
        struct regmap *regmap;
+       struct regmap_field *fields[F_MAX_FIELDS];
        struct regulator *regulator;
        struct iio_trigger *trig;
        int irq;
 };
 
 enum afe4403_chan_id {
+       LED2 = 1,
+       ALED2,
        LED1,
        ALED1,
-       LED2,
-       ALED2,
-       LED1_ALED1,
        LED2_ALED2,
-       ILED1,
-       ILED2,
+       LED1_ALED1,
 };
 
-static const struct afe440x_reg_info afe4403_reg_info[] = {
-       [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, 0, NULL),
-       [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, 0, NULL),
-       [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, 0, NULL),
-       [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
-       [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
-       [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
-       [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED1),
-       [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE440X_LEDCNTRL_LED2),
+static const unsigned int afe4403_channel_values[] = {
+       [LED2] = AFE440X_LED2VAL,
+       [ALED2] = AFE440X_ALED2VAL,
+       [LED1] = AFE440X_LED1VAL,
+       [ALED1] = AFE440X_ALED1VAL,
+       [LED2_ALED2] = AFE440X_LED2_ALED2VAL,
+       [LED1_ALED1] = AFE440X_LED1_ALED1VAL,
+};
+
+static const unsigned int afe4403_channel_leds[] = {
+       [LED2] = F_ILED2,
+       [LED1] = F_ILED1,
 };
 
 static const struct iio_chan_spec afe4403_channels[] = {
        /* ADC values */
-       AFE440X_INTENSITY_CHAN(LED1, "led1", 0),
-       AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", 0),
-       AFE440X_INTENSITY_CHAN(LED2, "led2", 0),
-       AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", 0),
-       AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
-       AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+       AFE440X_INTENSITY_CHAN(LED2, 0),
+       AFE440X_INTENSITY_CHAN(ALED2, 0),
+       AFE440X_INTENSITY_CHAN(LED1, 0),
+       AFE440X_INTENSITY_CHAN(ALED1, 0),
+       AFE440X_INTENSITY_CHAN(LED2_ALED2, 0),
+       AFE440X_INTENSITY_CHAN(LED1_ALED1, 0),
        /* LED current */
-       AFE440X_CURRENT_CHAN(ILED1, "led1"),
-       AFE440X_CURRENT_CHAN(ILED2, "led2"),
+       AFE440X_CURRENT_CHAN(LED2),
+       AFE440X_CURRENT_CHAN(LED1),
 };
 
 static const struct afe440x_val_table afe4403_res_table[] = {
        { 500000 }, { 250000 }, { 100000 }, { 50000 },
        { 25000 }, { 10000 }, { 1000000 }, { 0 },
 };
-AFE440X_TABLE_ATTR(tia_resistance_available, afe4403_res_table);
+AFE440X_TABLE_ATTR(in_intensity_resistance_available, afe4403_res_table);
 
 static const struct afe440x_val_table afe4403_cap_table[] = {
        { 0, 5000 }, { 0, 10000 }, { 0, 20000 }, { 0, 25000 },
@@ -171,7 +134,7 @@ static const struct afe440x_val_table afe4403_cap_table[] = {
        { 0, 205000 }, { 0, 210000 }, { 0, 220000 }, { 0, 225000 },
        { 0, 230000 }, { 0, 235000 }, { 0, 245000 }, { 0, 250000 },
 };
-AFE440X_TABLE_ATTR(tia_capacitance_available, afe4403_cap_table);
+AFE440X_TABLE_ATTR(in_intensity_capacitance_available, afe4403_cap_table);
 
 static ssize_t afe440x_show_register(struct device *dev,
                                     struct device_attribute *attr,
@@ -180,38 +143,21 @@ static ssize_t afe440x_show_register(struct device *dev,
        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
        struct afe4403_data *afe = iio_priv(indio_dev);
        struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
-       unsigned int reg_val, type;
+       unsigned int reg_val;
        int vals[2];
-       int ret, val_len;
+       int ret;
 
-       ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+       ret = regmap_field_read(afe->fields[afe440x_attr->field], &reg_val);
        if (ret)
                return ret;
 
-       reg_val &= afe440x_attr->mask;
-       reg_val >>= afe440x_attr->shift;
-
-       switch (afe440x_attr->type) {
-       case SIMPLE:
-               type = IIO_VAL_INT;
-               val_len = 1;
-               vals[0] = reg_val;
-               break;
-       case RESISTANCE:
-       case CAPACITANCE:
-               type = IIO_VAL_INT_PLUS_MICRO;
-               val_len = 2;
-               if (reg_val < afe440x_attr->table_size) {
-                       vals[0] = afe440x_attr->val_table[reg_val].integer;
-                       vals[1] = afe440x_attr->val_table[reg_val].fract;
-                       break;
-               }
-               return -EINVAL;
-       default:
+       if (reg_val >= afe440x_attr->table_size)
                return -EINVAL;
-       }
 
-       return iio_format_value(buf, type, val_len, vals);
+       vals[0] = afe440x_attr->val_table[reg_val].integer;
+       vals[1] = afe440x_attr->val_table[reg_val].fract;
+
+       return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals);
 }
 
 static ssize_t afe440x_store_register(struct device *dev,
@@ -227,48 +173,43 @@ static ssize_t afe440x_store_register(struct device *dev,
        if (ret)
                return ret;
 
-       switch (afe440x_attr->type) {
-       case SIMPLE:
-               val = integer;
-               break;
-       case RESISTANCE:
-       case CAPACITANCE:
-               for (val = 0; val < afe440x_attr->table_size; val++)
-                       if (afe440x_attr->val_table[val].integer == integer &&
-                           afe440x_attr->val_table[val].fract == fract)
-                               break;
-               if (val == afe440x_attr->table_size)
-                       return -EINVAL;
-               break;
-       default:
+       for (val = 0; val < afe440x_attr->table_size; val++)
+               if (afe440x_attr->val_table[val].integer == integer &&
+                   afe440x_attr->val_table[val].fract == fract)
+                       break;
+       if (val == afe440x_attr->table_size)
                return -EINVAL;
-       }
 
-       ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
-                                afe440x_attr->mask,
-                                (val << afe440x_attr->shift));
+       ret = regmap_field_write(afe->fields[afe440x_attr->field], val);
        if (ret)
                return ret;
 
        return count;
 }
 
-static AFE440X_ATTR(tia_separate_en, AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+static AFE440X_ATTR(in_intensity1_resistance, F_RF_LED, afe4403_res_table);
+static AFE440X_ATTR(in_intensity1_capacitance, F_CF_LED, afe4403_cap_table);
+
+static AFE440X_ATTR(in_intensity2_resistance, F_RF_LED, afe4403_res_table);
+static AFE440X_ATTR(in_intensity2_capacitance, F_CF_LED, afe4403_cap_table);
 
-static AFE440X_ATTR(tia_resistance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
-static AFE440X_ATTR(tia_capacitance1, AFE4403_TIAGAIN, AFE4403_TIAGAIN_CAP, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+static AFE440X_ATTR(in_intensity3_resistance, F_RF_LED1, afe4403_res_table);
+static AFE440X_ATTR(in_intensity3_capacitance, F_CF_LED1, afe4403_cap_table);
 
-static AFE440X_ATTR(tia_resistance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, RESISTANCE, afe4403_res_table, ARRAY_SIZE(afe4403_res_table));
-static AFE440X_ATTR(tia_capacitance2, AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES, CAPACITANCE, afe4403_cap_table, ARRAY_SIZE(afe4403_cap_table));
+static AFE440X_ATTR(in_intensity4_resistance, F_RF_LED1, afe4403_res_table);
+static AFE440X_ATTR(in_intensity4_capacitance, F_CF_LED1, afe4403_cap_table);
 
 static struct attribute *afe440x_attributes[] = {
-       &afe440x_attr_tia_separate_en.dev_attr.attr,
-       &afe440x_attr_tia_resistance1.dev_attr.attr,
-       &afe440x_attr_tia_capacitance1.dev_attr.attr,
-       &afe440x_attr_tia_resistance2.dev_attr.attr,
-       &afe440x_attr_tia_capacitance2.dev_attr.attr,
-       &dev_attr_tia_resistance_available.attr,
-       &dev_attr_tia_capacitance_available.attr,
+       &dev_attr_in_intensity_resistance_available.attr,
+       &dev_attr_in_intensity_capacitance_available.attr,
+       &afe440x_attr_in_intensity1_resistance.dev_attr.attr,
+       &afe440x_attr_in_intensity1_capacitance.dev_attr.attr,
+       &afe440x_attr_in_intensity2_resistance.dev_attr.attr,
+       &afe440x_attr_in_intensity2_capacitance.dev_attr.attr,
+       &afe440x_attr_in_intensity3_resistance.dev_attr.attr,
+       &afe440x_attr_in_intensity3_capacitance.dev_attr.attr,
+       &afe440x_attr_in_intensity4_resistance.dev_attr.attr,
+       &afe440x_attr_in_intensity4_capacitance.dev_attr.attr,
        NULL
 };
 
@@ -309,35 +250,26 @@ static int afe4403_read_raw(struct iio_dev *indio_dev,
                            int *val, int *val2, long mask)
 {
        struct afe4403_data *afe = iio_priv(indio_dev);
-       const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+       unsigned int reg = afe4403_channel_values[chan->address];
+       unsigned int field = afe4403_channel_leds[chan->address];
        int ret;
 
        switch (chan->type) {
        case IIO_INTENSITY:
                switch (mask) {
                case IIO_CHAN_INFO_RAW:
-                       ret = afe4403_read(afe, reg_info.reg, val);
-                       if (ret)
-                               return ret;
-                       return IIO_VAL_INT;
-               case IIO_CHAN_INFO_OFFSET:
-                       ret = regmap_read(afe->regmap, reg_info.offreg,
-                                         val);
+                       ret = afe4403_read(afe, reg, val);
                        if (ret)
                                return ret;
-                       *val &= reg_info.mask;
-                       *val >>= reg_info.shift;
                        return IIO_VAL_INT;
                }
                break;
        case IIO_CURRENT:
                switch (mask) {
                case IIO_CHAN_INFO_RAW:
-                       ret = regmap_read(afe->regmap, reg_info.reg, val);
+                       ret = regmap_field_read(afe->fields[field], val);
                        if (ret)
                                return ret;
-                       *val &= reg_info.mask;
-                       *val >>= reg_info.shift;
                        return IIO_VAL_INT;
                case IIO_CHAN_INFO_SCALE:
                        *val = 0;
@@ -357,25 +289,13 @@ static int afe4403_write_raw(struct iio_dev *indio_dev,
                             int val, int val2, long mask)
 {
        struct afe4403_data *afe = iio_priv(indio_dev);
-       const struct afe440x_reg_info reg_info = afe4403_reg_info[chan->address];
+       unsigned int field = afe4403_channel_leds[chan->address];
 
        switch (chan->type) {
-       case IIO_INTENSITY:
-               switch (mask) {
-               case IIO_CHAN_INFO_OFFSET:
-                       return regmap_update_bits(afe->regmap,
-                               reg_info.offreg,
-                               reg_info.mask,
-                               (val << reg_info.shift));
-               }
-               break;
        case IIO_CURRENT:
                switch (mask) {
                case IIO_CHAN_INFO_RAW:
-                       return regmap_update_bits(afe->regmap,
-                               reg_info.reg,
-                               reg_info.mask,
-                               (val << reg_info.shift));
+                       return regmap_field_write(afe->fields[field], val);
                }
                break;
        default:
@@ -410,7 +330,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
        for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
                ret = spi_write_then_read(afe->spi,
-                                         &afe4403_reg_info[bit].reg, 1,
+                                         &afe4403_channel_values[bit], 1,
                                          rx, 3);
                if (ret)
                        goto err;
@@ -472,12 +392,8 @@ static const struct iio_trigger_ops afe4403_trigger_ops = {
 
 static const struct reg_sequence afe4403_reg_sequences[] = {
        AFE4403_TIMING_PAIRS,
-       { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN | 0x000007},
-       { AFE4403_TIA_AMB_GAIN, AFE4403_TIAGAIN_RES_1_M },
-       { AFE440X_LEDCNTRL, (0x14 << AFE440X_LEDCNTRL_LED1_SHIFT) |
-                           (0x14 << AFE440X_LEDCNTRL_LED2_SHIFT) },
-       { AFE440X_CONTROL2, AFE440X_CONTROL2_TX_REF_050 <<
-                           AFE440X_CONTROL2_TX_REF_SHIFT },
+       { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN },
+       { AFE4403_TIAGAIN, AFE440X_TIAGAIN_ENSEPGAIN },
 };
 
 static const struct regmap_range afe4403_yes_ranges[] = {
@@ -498,13 +414,11 @@ static const struct regmap_config afe4403_regmap_config = {
        .volatile_table = &afe4403_volatile_table,
 };
 
-#ifdef CONFIG_OF
 static const struct of_device_id afe4403_of_match[] = {
        { .compatible = "ti,afe4403", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, afe4403_of_match);
-#endif
 
 static int __maybe_unused afe4403_suspend(struct device *dev)
 {
@@ -553,7 +467,7 @@ static int afe4403_probe(struct spi_device *spi)
 {
        struct iio_dev *indio_dev;
        struct afe4403_data *afe;
-       int ret;
+       int i, ret;
 
        indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*afe));
        if (!indio_dev)
@@ -572,6 +486,15 @@ static int afe4403_probe(struct spi_device *spi)
                return PTR_ERR(afe->regmap);
        }
 
+       for (i = 0; i < F_MAX_FIELDS; i++) {
+               afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+                                                        afe4403_reg_fields[i]);
+               if (IS_ERR(afe->fields[i])) {
+                       dev_err(afe->dev, "Unable to allocate regmap fields\n");
+                       return PTR_ERR(afe->fields[i]);
+               }
+       }
+
        afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
        if (IS_ERR(afe->regulator)) {
                dev_err(afe->dev, "Unable to get regulator\n");
@@ -694,7 +617,7 @@ MODULE_DEVICE_TABLE(spi, afe4403_ids);
 static struct spi_driver afe4403_spi_driver = {
        .driver = {
                .name = AFE4403_DRIVER_NAME,
-               .of_match_table = of_match_ptr(afe4403_of_match),
+               .of_match_table = afe4403_of_match,
                .pm = &afe4403_pm_ops,
        },
        .probe = afe4403_probe,
@@ -704,5 +627,5 @@ static struct spi_driver afe4403_spi_driver = {
 module_spi_driver(afe4403_spi_driver);
 
 MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
-MODULE_DESCRIPTION("TI AFE4403 Heart Rate and Pulse Oximeter");
+MODULE_DESCRIPTION("TI AFE4403 Heart Rate Monitor and Pulse Oximeter AFE");
 MODULE_LICENSE("GPL v2");
index 5096a4643784721df32378b3995b91a9198803d3..45266404f7e3b5bd7c7790a0b05ff298fc24d5a9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * AFE4404 Heart Rate Monitors and Low-Cost Pulse Oximeters
  *
- * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/
  *     Andrew F. Davis <afd@ti.com>
  *
  * This program is free software; you can redistribute it and/or modify
 #define AFE4404_AVG_LED2_ALED2VAL      0x3f
 #define AFE4404_AVG_LED1_ALED1VAL      0x40
 
-/* AFE4404 GAIN register fields */
-#define AFE4404_TIA_GAIN_RES_MASK      GENMASK(2, 0)
-#define AFE4404_TIA_GAIN_RES_SHIFT     0
-#define AFE4404_TIA_GAIN_CAP_MASK      GENMASK(5, 3)
-#define AFE4404_TIA_GAIN_CAP_SHIFT     3
+/* AFE4404 CONTROL2 register fields */
+#define AFE440X_CONTROL2_OSC_ENABLE    BIT(9)
 
-/* AFE4404 LEDCNTRL register fields */
-#define AFE4404_LEDCNTRL_ILED1_MASK    GENMASK(5, 0)
-#define AFE4404_LEDCNTRL_ILED1_SHIFT   0
-#define AFE4404_LEDCNTRL_ILED2_MASK    GENMASK(11, 6)
-#define AFE4404_LEDCNTRL_ILED2_SHIFT   6
-#define AFE4404_LEDCNTRL_ILED3_MASK    GENMASK(17, 12)
-#define AFE4404_LEDCNTRL_ILED3_SHIFT   12
+enum afe4404_fields {
+       /* Gains */
+       F_TIA_GAIN_SEP, F_TIA_CF_SEP,
+       F_TIA_GAIN, TIA_CF,
 
-/* AFE4404 CONTROL2 register fields */
-#define AFE440X_CONTROL2_ILED_2X_MASK  BIT(17)
-#define AFE440X_CONTROL2_ILED_2X_SHIFT 17
-
-/* AFE4404 CONTROL3 register fields */
-#define AFE440X_CONTROL3_OSC_ENABLE    BIT(9)
-
-/* AFE4404 OFFDAC register current fields */
-#define AFE4404_OFFDAC_CURR_LED1_MASK  GENMASK(9, 5)
-#define AFE4404_OFFDAC_CURR_LED1_SHIFT 5
-#define AFE4404_OFFDAC_CURR_LED2_MASK  GENMASK(19, 15)
-#define AFE4404_OFFDAC_CURR_LED2_SHIFT 15
-#define AFE4404_OFFDAC_CURR_LED3_MASK  GENMASK(4, 0)
-#define AFE4404_OFFDAC_CURR_LED3_SHIFT 0
-#define AFE4404_OFFDAC_CURR_ALED1_MASK GENMASK(14, 10)
-#define AFE4404_OFFDAC_CURR_ALED1_SHIFT        10
-#define AFE4404_OFFDAC_CURR_ALED2_MASK GENMASK(4, 0)
-#define AFE4404_OFFDAC_CURR_ALED2_SHIFT        0
-
-/* AFE4404 NULL fields */
-#define NULL_MASK      0
-#define NULL_SHIFT     0
-
-/* AFE4404 TIA_GAIN_CAP values */
-#define AFE4404_TIA_GAIN_CAP_5_P       0x0
-#define AFE4404_TIA_GAIN_CAP_2_5_P     0x1
-#define AFE4404_TIA_GAIN_CAP_10_P      0x2
-#define AFE4404_TIA_GAIN_CAP_7_5_P     0x3
-#define AFE4404_TIA_GAIN_CAP_20_P      0x4
-#define AFE4404_TIA_GAIN_CAP_17_5_P    0x5
-#define AFE4404_TIA_GAIN_CAP_25_P      0x6
-#define AFE4404_TIA_GAIN_CAP_22_5_P    0x7
-
-/* AFE4404 TIA_GAIN_RES values */
-#define AFE4404_TIA_GAIN_RES_500_K     0x0
-#define AFE4404_TIA_GAIN_RES_250_K     0x1
-#define AFE4404_TIA_GAIN_RES_100_K     0x2
-#define AFE4404_TIA_GAIN_RES_50_K      0x3
-#define AFE4404_TIA_GAIN_RES_25_K      0x4
-#define AFE4404_TIA_GAIN_RES_10_K      0x5
-#define AFE4404_TIA_GAIN_RES_1_M       0x6
-#define AFE4404_TIA_GAIN_RES_2_M       0x7
+       /* LED Current */
+       F_ILED1, F_ILED2, F_ILED3,
+
+       /* Offset DAC */
+       F_OFFDAC_AMB2, F_OFFDAC_LED1, F_OFFDAC_AMB1, F_OFFDAC_LED2,
+
+       /* sentinel */
+       F_MAX_FIELDS
+};
+
+static const struct reg_field afe4404_reg_fields[] = {
+       /* Gains */
+       [F_TIA_GAIN_SEP]        = REG_FIELD(AFE4404_TIA_GAIN_SEP, 0, 2),
+       [F_TIA_CF_SEP]          = REG_FIELD(AFE4404_TIA_GAIN_SEP, 3, 5),
+       [F_TIA_GAIN]            = REG_FIELD(AFE4404_TIA_GAIN, 0, 2),
+       [TIA_CF]                = REG_FIELD(AFE4404_TIA_GAIN, 3, 5),
+       /* LED Current */
+       [F_ILED1]               = REG_FIELD(AFE440X_LEDCNTRL, 0, 5),
+       [F_ILED2]               = REG_FIELD(AFE440X_LEDCNTRL, 6, 11),
+       [F_ILED3]               = REG_FIELD(AFE440X_LEDCNTRL, 12, 17),
+       /* Offset DAC */
+       [F_OFFDAC_AMB2]         = REG_FIELD(AFE4404_OFFDAC, 0, 4),
+       [F_OFFDAC_LED1]         = REG_FIELD(AFE4404_OFFDAC, 5, 9),
+       [F_OFFDAC_AMB1]         = REG_FIELD(AFE4404_OFFDAC, 10, 14),
+       [F_OFFDAC_LED2]         = REG_FIELD(AFE4404_OFFDAC, 15, 19),
+};
 
 /**
- * struct afe4404_data
- * @dev - Device structure
- * @regmap - Register map of the device
- * @regulator - Pointer to the regulator for the IC
- * @trig - IIO trigger for this device
- * @irq - ADC_RDY line interrupt number
+ * struct afe4404_data - AFE4404 device instance data
+ * @dev: Device structure
+ * @regmap: Register map of the device
+ * @fields: Register fields of the device
+ * @regulator: Pointer to the regulator for the IC
+ * @trig: IIO trigger for this device
+ * @irq: ADC_RDY line interrupt number
  */
 struct afe4404_data {
        struct device *dev;
        struct regmap *regmap;
+       struct regmap_field *fields[F_MAX_FIELDS];
        struct regulator *regulator;
        struct iio_trigger *trig;
        int irq;
 };
 
 enum afe4404_chan_id {
+       LED2 = 1,
+       ALED2,
        LED1,
        ALED1,
-       LED2,
-       ALED2,
-       LED3,
-       LED1_ALED1,
        LED2_ALED2,
-       ILED1,
-       ILED2,
-       ILED3,
+       LED1_ALED1,
+};
+
+static const unsigned int afe4404_channel_values[] = {
+       [LED2] = AFE440X_LED2VAL,
+       [ALED2] = AFE440X_ALED2VAL,
+       [LED1] = AFE440X_LED1VAL,
+       [ALED1] = AFE440X_ALED1VAL,
+       [LED2_ALED2] = AFE440X_LED2_ALED2VAL,
+       [LED1_ALED1] = AFE440X_LED1_ALED1VAL,
 };
 
-static const struct afe440x_reg_info afe4404_reg_info[] = {
-       [LED1] = AFE440X_REG_INFO(AFE440X_LED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED1),
-       [ALED1] = AFE440X_REG_INFO(AFE440X_ALED1VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED1),
-       [LED2] = AFE440X_REG_INFO(AFE440X_LED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_LED2),
-       [ALED2] = AFE440X_REG_INFO(AFE440X_ALED2VAL, AFE4404_OFFDAC, AFE4404_OFFDAC_CURR_ALED2),
-       [LED3] = AFE440X_REG_INFO(AFE440X_ALED2VAL, 0, NULL),
-       [LED1_ALED1] = AFE440X_REG_INFO(AFE440X_LED1_ALED1VAL, 0, NULL),
-       [LED2_ALED2] = AFE440X_REG_INFO(AFE440X_LED2_ALED2VAL, 0, NULL),
-       [ILED1] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED1),
-       [ILED2] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED2),
-       [ILED3] = AFE440X_REG_INFO(AFE440X_LEDCNTRL, 0, AFE4404_LEDCNTRL_ILED3),
+static const unsigned int afe4404_channel_leds[] = {
+       [LED2] = F_ILED2,
+       [ALED2] = F_ILED3,
+       [LED1] = F_ILED1,
+};
+
+static const unsigned int afe4404_channel_offdacs[] = {
+       [LED2] = F_OFFDAC_LED2,
+       [ALED2] = F_OFFDAC_AMB2,
+       [LED1] = F_OFFDAC_LED1,
+       [ALED1] = F_OFFDAC_AMB1,
 };
 
 static const struct iio_chan_spec afe4404_channels[] = {
        /* ADC values */
-       AFE440X_INTENSITY_CHAN(LED1, "led1", BIT(IIO_CHAN_INFO_OFFSET)),
-       AFE440X_INTENSITY_CHAN(ALED1, "led1_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
-       AFE440X_INTENSITY_CHAN(LED2, "led2", BIT(IIO_CHAN_INFO_OFFSET)),
-       AFE440X_INTENSITY_CHAN(ALED2, "led2_ambient", BIT(IIO_CHAN_INFO_OFFSET)),
-       AFE440X_INTENSITY_CHAN(LED3, "led3", BIT(IIO_CHAN_INFO_OFFSET)),
-       AFE440X_INTENSITY_CHAN(LED1_ALED1, "led1-led1_ambient", 0),
-       AFE440X_INTENSITY_CHAN(LED2_ALED2, "led2-led2_ambient", 0),
+       AFE440X_INTENSITY_CHAN(LED2, BIT(IIO_CHAN_INFO_OFFSET)),
+       AFE440X_INTENSITY_CHAN(ALED2, BIT(IIO_CHAN_INFO_OFFSET)),
+       AFE440X_INTENSITY_CHAN(LED1, BIT(IIO_CHAN_INFO_OFFSET)),
+       AFE440X_INTENSITY_CHAN(ALED1, BIT(IIO_CHAN_INFO_OFFSET)),
+       AFE440X_INTENSITY_CHAN(LED2_ALED2, 0),
+       AFE440X_INTENSITY_CHAN(LED1_ALED1, 0),
        /* LED current */
-       AFE440X_CURRENT_CHAN(ILED1, "led1"),
-       AFE440X_CURRENT_CHAN(ILED2, "led2"),
-       AFE440X_CURRENT_CHAN(ILED3, "led3"),
+       AFE440X_CURRENT_CHAN(LED2),
+       AFE440X_CURRENT_CHAN(ALED2),
+       AFE440X_CURRENT_CHAN(LED1),
 };
 
 static const struct afe440x_val_table afe4404_res_table[] = {
@@ -172,7 +156,7 @@ static const struct afe440x_val_table afe4404_res_table[] = {
        { .integer = 1000000, .fract = 0 },
        { .integer = 2000000, .fract = 0 },
 };
-AFE440X_TABLE_ATTR(tia_resistance_available, afe4404_res_table);
+AFE440X_TABLE_ATTR(in_intensity_resistance_available, afe4404_res_table);
 
 static const struct afe440x_val_table afe4404_cap_table[] = {
        { .integer = 0, .fract = 5000 },
@@ -184,7 +168,7 @@ static const struct afe440x_val_table afe4404_cap_table[] = {
        { .integer = 0, .fract = 25000 },
        { .integer = 0, .fract = 22500 },
 };
-AFE440X_TABLE_ATTR(tia_capacitance_available, afe4404_cap_table);
+AFE440X_TABLE_ATTR(in_intensity_capacitance_available, afe4404_cap_table);
 
 static ssize_t afe440x_show_register(struct device *dev,
                                     struct device_attribute *attr,
@@ -193,38 +177,21 @@ static ssize_t afe440x_show_register(struct device *dev,
        struct iio_dev *indio_dev = dev_to_iio_dev(dev);
        struct afe4404_data *afe = iio_priv(indio_dev);
        struct afe440x_attr *afe440x_attr = to_afe440x_attr(attr);
-       unsigned int reg_val, type;
+       unsigned int reg_val;
        int vals[2];
-       int ret, val_len;
+       int ret;
 
-       ret = regmap_read(afe->regmap, afe440x_attr->reg, &reg_val);
+       ret = regmap_field_read(afe->fields[afe440x_attr->field], &reg_val);
        if (ret)
                return ret;
 
-       reg_val &= afe440x_attr->mask;
-       reg_val >>= afe440x_attr->shift;
-
-       switch (afe440x_attr->type) {
-       case SIMPLE:
-               type = IIO_VAL_INT;
-               val_len = 1;
-               vals[0] = reg_val;
-               break;
-       case RESISTANCE:
-       case CAPACITANCE:
-               type = IIO_VAL_INT_PLUS_MICRO;
-               val_len = 2;
-               if (reg_val < afe440x_attr->table_size) {
-                       vals[0] = afe440x_attr->val_table[reg_val].integer;
-                       vals[1] = afe440x_attr->val_table[reg_val].fract;
-                       break;
-               }
-               return -EINVAL;
-       default:
+       if (reg_val >= afe440x_attr->table_size)
                return -EINVAL;
-       }
 
-       return iio_format_value(buf, type, val_len, vals);
+       vals[0] = afe440x_attr->val_table[reg_val].integer;
+       vals[1] = afe440x_attr->val_table[reg_val].fract;
+
+       return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, vals);
 }
 
 static ssize_t afe440x_store_register(struct device *dev,
@@ -240,48 +207,43 @@ static ssize_t afe440x_store_register(struct device *dev,
        if (ret)
                return ret;
 
-       switch (afe440x_attr->type) {
-       case SIMPLE:
-               val = integer;
-               break;
-       case RESISTANCE:
-       case CAPACITANCE:
-               for (val = 0; val < afe440x_attr->table_size; val++)
-                       if (afe440x_attr->val_table[val].integer == integer &&
-                           afe440x_attr->val_table[val].fract == fract)
-                               break;
-               if (val == afe440x_attr->table_size)
-                       return -EINVAL;
-               break;
-       default:
+       for (val = 0; val < afe440x_attr->table_size; val++)
+               if (afe440x_attr->val_table[val].integer == integer &&
+                   afe440x_attr->val_table[val].fract == fract)
+                       break;
+       if (val == afe440x_attr->table_size)
                return -EINVAL;
-       }
 
-       ret = regmap_update_bits(afe->regmap, afe440x_attr->reg,
-                                afe440x_attr->mask,
-                                (val << afe440x_attr->shift));
+       ret = regmap_field_write(afe->fields[afe440x_attr->field], val);
        if (ret)
                return ret;
 
        return count;
 }
 
-static AFE440X_ATTR(tia_separate_en, AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN, SIMPLE, NULL, 0);
+static AFE440X_ATTR(in_intensity1_resistance, F_TIA_GAIN_SEP, afe4404_res_table);
+static AFE440X_ATTR(in_intensity1_capacitance, F_TIA_CF_SEP, afe4404_cap_table);
+
+static AFE440X_ATTR(in_intensity2_resistance, F_TIA_GAIN_SEP, afe4404_res_table);
+static AFE440X_ATTR(in_intensity2_capacitance, F_TIA_CF_SEP, afe4404_cap_table);
 
-static AFE440X_ATTR(tia_resistance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
-static AFE440X_ATTR(tia_capacitance1, AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+static AFE440X_ATTR(in_intensity3_resistance, F_TIA_GAIN, afe4404_res_table);
+static AFE440X_ATTR(in_intensity3_capacitance, TIA_CF, afe4404_cap_table);
 
-static AFE440X_ATTR(tia_resistance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_RES, RESISTANCE, afe4404_res_table, ARRAY_SIZE(afe4404_res_table));
-static AFE440X_ATTR(tia_capacitance2, AFE4404_TIA_GAIN_SEP, AFE4404_TIA_GAIN_CAP, CAPACITANCE, afe4404_cap_table, ARRAY_SIZE(afe4404_cap_table));
+static AFE440X_ATTR(in_intensity4_resistance, F_TIA_GAIN, afe4404_res_table);
+static AFE440X_ATTR(in_intensity4_capacitance, TIA_CF, afe4404_cap_table);
 
 static struct attribute *afe440x_attributes[] = {
-       &afe440x_attr_tia_separate_en.dev_attr.attr,
-       &afe440x_attr_tia_resistance1.dev_attr.attr,
-       &afe440x_attr_tia_capacitance1.dev_attr.attr,
-       &afe440x_attr_tia_resistance2.dev_attr.attr,
-       &afe440x_attr_tia_capacitance2.dev_attr.attr,
-       &dev_attr_tia_resistance_available.attr,
-       &dev_attr_tia_capacitance_available.attr,
+       &dev_attr_in_intensity_resistance_available.attr,
+       &dev_attr_in_intensity_capacitance_available.attr,
+       &afe440x_attr_in_intensity1_resistance.dev_attr.attr,
+       &afe440x_attr_in_intensity1_capacitance.dev_attr.attr,
+       &afe440x_attr_in_intensity2_resistance.dev_attr.attr,
+       &afe440x_attr_in_intensity2_capacitance.dev_attr.attr,
+       &afe440x_attr_in_intensity3_resistance.dev_attr.attr,
+       &afe440x_attr_in_intensity3_capacitance.dev_attr.attr,
+       &afe440x_attr_in_intensity4_resistance.dev_attr.attr,
+       &afe440x_attr_in_intensity4_capacitance.dev_attr.attr,
        NULL
 };
 
@@ -294,35 +256,32 @@ static int afe4404_read_raw(struct iio_dev *indio_dev,
                            int *val, int *val2, long mask)
 {
        struct afe4404_data *afe = iio_priv(indio_dev);
-       const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+       unsigned int value_reg = afe4404_channel_values[chan->address];
+       unsigned int led_field = afe4404_channel_leds[chan->address];
+       unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
        int ret;
 
        switch (chan->type) {
        case IIO_INTENSITY:
                switch (mask) {
                case IIO_CHAN_INFO_RAW:
-                       ret = regmap_read(afe->regmap, reg_info.reg, val);
+                       ret = regmap_read(afe->regmap, value_reg, val);
                        if (ret)
                                return ret;
                        return IIO_VAL_INT;
                case IIO_CHAN_INFO_OFFSET:
-                       ret = regmap_read(afe->regmap, reg_info.offreg,
-                                         val);
+                       ret = regmap_field_read(afe->fields[offdac_field], val);
                        if (ret)
                                return ret;
-                       *val &= reg_info.mask;
-                       *val >>= reg_info.shift;
                        return IIO_VAL_INT;
                }
                break;
        case IIO_CURRENT:
                switch (mask) {
                case IIO_CHAN_INFO_RAW:
-                       ret = regmap_read(afe->regmap, reg_info.reg, val);
+                       ret = regmap_field_read(afe->fields[led_field], val);
                        if (ret)
                                return ret;
-                       *val &= reg_info.mask;
-                       *val >>= reg_info.shift;
                        return IIO_VAL_INT;
                case IIO_CHAN_INFO_SCALE:
                        *val = 0;
@@ -342,25 +301,20 @@ static int afe4404_write_raw(struct iio_dev *indio_dev,
                             int val, int val2, long mask)
 {
        struct afe4404_data *afe = iio_priv(indio_dev);
-       const struct afe440x_reg_info reg_info = afe4404_reg_info[chan->address];
+       unsigned int led_field = afe4404_channel_leds[chan->address];
+       unsigned int offdac_field = afe4404_channel_offdacs[chan->address];
 
        switch (chan->type) {
        case IIO_INTENSITY:
                switch (mask) {
                case IIO_CHAN_INFO_OFFSET:
-                       return regmap_update_bits(afe->regmap,
-                               reg_info.offreg,
-                               reg_info.mask,
-                               (val << reg_info.shift));
+                       return regmap_field_write(afe->fields[offdac_field], val);
                }
                break;
        case IIO_CURRENT:
                switch (mask) {
                case IIO_CHAN_INFO_RAW:
-                       return regmap_update_bits(afe->regmap,
-                               reg_info.reg,
-                               reg_info.mask,
-                               (val << reg_info.shift));
+                       return regmap_field_write(afe->fields[led_field], val);
                }
                break;
        default:
@@ -387,7 +341,7 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private)
 
        for_each_set_bit(bit, indio_dev->active_scan_mask,
                         indio_dev->masklength) {
-               ret = regmap_read(afe->regmap, afe4404_reg_info[bit].reg,
+               ret = regmap_read(afe->regmap, afe4404_channel_values[bit],
                                  &buffer[i++]);
                if (ret)
                        goto err;
@@ -443,11 +397,8 @@ static const struct iio_trigger_ops afe4404_trigger_ops = {
 static const struct reg_sequence afe4404_reg_sequences[] = {
        AFE4404_TIMING_PAIRS,
        { AFE440X_CONTROL1, AFE440X_CONTROL1_TIMEREN },
-       { AFE4404_TIA_GAIN, AFE4404_TIA_GAIN_RES_50_K },
-       { AFE440X_LEDCNTRL, (0xf << AFE4404_LEDCNTRL_ILED1_SHIFT) |
-                           (0x3 << AFE4404_LEDCNTRL_ILED2_SHIFT) |
-                           (0x3 << AFE4404_LEDCNTRL_ILED3_SHIFT) },
-       { AFE440X_CONTROL2, AFE440X_CONTROL3_OSC_ENABLE },
+       { AFE4404_TIA_GAIN_SEP, AFE440X_TIAGAIN_ENSEPGAIN },
+       { AFE440X_CONTROL2, AFE440X_CONTROL2_OSC_ENABLE },
 };
 
 static const struct regmap_range afe4404_yes_ranges[] = {
@@ -469,13 +420,11 @@ static const struct regmap_config afe4404_regmap_config = {
        .volatile_table = &afe4404_volatile_table,
 };
 
-#ifdef CONFIG_OF
 static const struct of_device_id afe4404_of_match[] = {
        { .compatible = "ti,afe4404", },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, afe4404_of_match);
-#endif
 
 static int __maybe_unused afe4404_suspend(struct device *dev)
 {
@@ -525,7 +474,7 @@ static int afe4404_probe(struct i2c_client *client,
 {
        struct iio_dev *indio_dev;
        struct afe4404_data *afe;
-       int ret;
+       int i, ret;
 
        indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*afe));
        if (!indio_dev)
@@ -543,6 +492,15 @@ static int afe4404_probe(struct i2c_client *client,
                return PTR_ERR(afe->regmap);
        }
 
+       for (i = 0; i < F_MAX_FIELDS; i++) {
+               afe->fields[i] = devm_regmap_field_alloc(afe->dev, afe->regmap,
+                                                        afe4404_reg_fields[i]);
+               if (IS_ERR(afe->fields[i])) {
+                       dev_err(afe->dev, "Unable to allocate regmap fields\n");
+                       return PTR_ERR(afe->fields[i]);
+               }
+       }
+
        afe->regulator = devm_regulator_get(afe->dev, "tx_sup");
        if (IS_ERR(afe->regulator)) {
                dev_err(afe->dev, "Unable to get regulator\n");
@@ -665,7 +623,7 @@ MODULE_DEVICE_TABLE(i2c, afe4404_ids);
 static struct i2c_driver afe4404_i2c_driver = {
        .driver = {
                .name = AFE4404_DRIVER_NAME,
-               .of_match_table = of_match_ptr(afe4404_of_match),
+               .of_match_table = afe4404_of_match,
                .pm = &afe4404_pm_ops,
        },
        .probe = afe4404_probe,
@@ -675,5 +633,5 @@ static struct i2c_driver afe4404_i2c_driver = {
 module_i2c_driver(afe4404_i2c_driver);
 
 MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
-MODULE_DESCRIPTION("TI AFE4404 Heart Rate and Pulse Oximeter");
+MODULE_DESCRIPTION("TI AFE4404 Heart Rate Monitor and Pulse Oximeter AFE");
 MODULE_LICENSE("GPL v2");
index c671ab78a23ac6109b57fcc19a7925e7563149d1..1a0f247043ca14ab66e73c8fcc2191f57291fbdc 100644 (file)
@@ -71,8 +71,7 @@
 #define AFE440X_CONTROL1_TIMEREN       BIT(8)
 
 /* TIAGAIN register fields */
-#define AFE440X_TIAGAIN_ENSEPGAIN_MASK BIT(15)
-#define AFE440X_TIAGAIN_ENSEPGAIN_SHIFT        15
+#define AFE440X_TIAGAIN_ENSEPGAIN      BIT(15)
 
 /* CONTROL2 register fields */
 #define AFE440X_CONTROL2_PDN_AFE       BIT(0)
 #define AFE440X_CONTROL0_WRITE         0x0
 #define AFE440X_CONTROL0_READ          0x1
 
-struct afe440x_reg_info {
-       unsigned int reg;
-       unsigned int offreg;
-       unsigned int shift;
-       unsigned int mask;
-};
-
-#define AFE440X_REG_INFO(_reg, _offreg, _sm)                   \
-       {                                                       \
-               .reg = _reg,                                    \
-               .offreg = _offreg,                              \
-               .shift = _sm ## _SHIFT,                         \
-               .mask = _sm ## _MASK,                           \
-       }
-
-#define AFE440X_INTENSITY_CHAN(_index, _name, _mask)           \
+#define AFE440X_INTENSITY_CHAN(_index, _mask)                  \
        {                                                       \
                .type = IIO_INTENSITY,                          \
                .channel = _index,                              \
@@ -116,29 +100,23 @@ struct afe440x_reg_info {
                                .storagebits = 32,              \
                                .endianness = IIO_CPU,          \
                },                                              \
-               .extend_name = _name,                           \
                .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |  \
                        _mask,                                  \
+               .indexed = true,                                \
        }
 
-#define AFE440X_CURRENT_CHAN(_index, _name)                    \
+#define AFE440X_CURRENT_CHAN(_index)                           \
        {                                                       \
                .type = IIO_CURRENT,                            \
                .channel = _index,                              \
                .address = _index,                              \
-               .scan_index = _index,                           \
-               .extend_name = _name,                           \
+               .scan_index = -1,                               \
                .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |  \
                        BIT(IIO_CHAN_INFO_SCALE),               \
+               .indexed = true,                                \
                .output = true,                                 \
        }
 
-enum afe440x_reg_type {
-       SIMPLE,
-       RESISTANCE,
-       CAPACITANCE,
-};
-
 struct afe440x_val_table {
        int integer;
        int fract;
@@ -164,10 +142,7 @@ static DEVICE_ATTR_RO(_name)
 
 struct afe440x_attr {
        struct device_attribute dev_attr;
-       unsigned int reg;
-       unsigned int shift;
-       unsigned int mask;
-       enum afe440x_reg_type type;
+       unsigned int field;
        const struct afe440x_val_table *val_table;
        unsigned int table_size;
 };
@@ -175,17 +150,14 @@ struct afe440x_attr {
 #define to_afe440x_attr(_dev_attr)                             \
        container_of(_dev_attr, struct afe440x_attr, dev_attr)
 
-#define AFE440X_ATTR(_name, _reg, _field, _type, _table, _size)        \
+#define AFE440X_ATTR(_name, _field, _table)                    \
        struct afe440x_attr afe440x_attr_##_name = {            \
                .dev_attr = __ATTR(_name, (S_IRUGO | S_IWUSR),  \
                                   afe440x_show_register,       \
                                   afe440x_store_register),     \
-               .reg = _reg,                                    \
-               .shift = _field ## _SHIFT,                      \
-               .mask = _field ## _MASK,                        \
-               .type = _type,                                  \
+               .field = _field,                                \
                .val_table = _table,                            \
-               .table_size = _size,                            \
+               .table_size = ARRAY_SIZE(_table),               \
        }
 
 #endif /* _AFE440X_H */
index 11535911a5c697fa359aec0a4626f7a80b3d1e12..3e200f69e88657488a19de758c318f69ec314759 100644 (file)
@@ -276,6 +276,7 @@ static const struct i2c_device_id am2315_i2c_id[] = {
        {"am2315", 0},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, am2315_i2c_id);
 
 static const struct acpi_device_id am2315_acpi_id[] = {
        {"AOS2315", 0},
index 11cbc38b450f77179c7713a795a93fcf58f9ac43..0fbbd8c408945358fbdc58510d539ab24717be13 100644 (file)
@@ -236,6 +236,7 @@ static const struct i2c_device_id htu21_id[] = {
        {"ms8607-humidity", MS8607},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, htu21_id);
 
 static struct i2c_driver htu21_driver = {
        .probe = htu21_probe,
index 359883525ab72b463ca041207a88a60724fadfa6..4c45488e3a7f51d9fec1f3f70e555a24be9d5a18 100644 (file)
@@ -79,4 +79,7 @@ void iio_device_unregister_eventset(struct iio_dev *indio_dev);
 void iio_device_wakeup_eventset(struct iio_dev *indio_dev);
 int iio_event_getfd(struct iio_dev *indio_dev);
 
+struct iio_event_interface;
+bool iio_event_enabled(const struct iio_event_interface *ev_int);
+
 #endif
index b8a290ec984e33f79aaa8b546f543709e579b7b3..e0251b8c1a527aa7fe97ef443c0b1614888e63fc 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/iio/triggered_buffer.h>
 #include <linux/iio/trigger_consumer.h>
 #include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
 
 #include "bmi160.h"
 
@@ -410,7 +411,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
                buf[j++] = sample;
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+                                          iio_get_time_ns(indio_dev));
 done:
        iio_trigger_notify_done(indio_dev->trig);
        return IRQ_HANDLED;
@@ -466,10 +468,36 @@ static int bmi160_write_raw(struct iio_dev *indio_dev,
        return 0;
 }
 
+static
+IIO_CONST_ATTR(in_accel_sampling_frequency_available,
+              "0.78125 1.5625 3.125 6.25 12.5 25 50 100 200 400 800 1600");
+static
+IIO_CONST_ATTR(in_anglvel_sampling_frequency_available,
+              "25 50 100 200 400 800 1600 3200");
+static
+IIO_CONST_ATTR(in_accel_scale_available,
+              "0.000598 0.001197 0.002394 0.004788");
+static
+IIO_CONST_ATTR(in_anglvel_scale_available,
+              "0.001065 0.000532 0.000266 0.000133 0.000066");
+
+static struct attribute *bmi160_attrs[] = {
+       &iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
+       &iio_const_attr_in_anglvel_sampling_frequency_available.dev_attr.attr,
+       &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+       &iio_const_attr_in_anglvel_scale_available.dev_attr.attr,
+       NULL,
+};
+
+static const struct attribute_group bmi160_attrs_group = {
+       .attrs = bmi160_attrs,
+};
+
 static const struct iio_info bmi160_info = {
        .driver_module = THIS_MODULE,
        .read_raw = bmi160_read_raw,
        .write_raw = bmi160_write_raw,
+       .attrs = &bmi160_attrs_group,
 };
 
 static const char *bmi160_match_acpi_device(struct device *dev)
index f756feecfa4ca966e381e2d7a79e1de38014ea4e..5483b2ea754dd37c624fbe513b5bf721266cf88b 100644 (file)
@@ -13,8 +13,8 @@ config INV_MPU6050_I2C
        select INV_MPU6050_IIO
        select REGMAP_I2C
        help
-         This driver supports the Invensense MPU6050/6500/9150 motion tracking
-         devices over I2C.
+         This driver supports the Invensense MPU6050/6500/9150 and ICM20608
+         motion tracking devices over I2C.
          This driver can be built as a module. The module will be called
          inv-mpu6050-i2c.
 
@@ -24,7 +24,7 @@ config INV_MPU6050_SPI
        select INV_MPU6050_IIO
        select REGMAP_SPI
        help
-         This driver supports the Invensense MPU6000/6500/9150 motion tracking
-         devices over SPI.
+         This driver supports the Invensense MPU6050/6500/9150 and ICM20608
+         motion tracking devices over SPI.
          This driver can be built as a module. The module will be called
          inv-mpu6050-spi.
index ee40dae5ab58e5f937be5a2b3444c0e8de215168..b9fcbf18aa99e53f4fb2b0ec62949ef983f9b0e0 100644 (file)
@@ -113,6 +113,12 @@ static const struct inv_mpu6050_hw hw_info[] = {
                .reg = &reg_set_6050,
                .config = &chip_config_6050,
        },
+       {
+               .whoami = INV_ICM20608_WHOAMI_VALUE,
+               .name = "ICM20608",
+               .reg = &reg_set_6500,
+               .config = &chip_config_6050,
+       },
 };
 
 int inv_mpu6050_switch_engine(struct inv_mpu6050_state *st, bool en, u32 mask)
index e1fd7fa53e3ba1eda97b78cb862c4a6a20bb8f83..19580d1db597a5bc33ce3f9350900edc0d1aa0af 100644 (file)
@@ -170,6 +170,7 @@ static const struct i2c_device_id inv_mpu_id[] = {
        {"mpu6050", INV_MPU6050},
        {"mpu6500", INV_MPU6500},
        {"mpu9150", INV_MPU9150},
+       {"icm20608", INV_ICM20608},
        {}
 };
 
index 3bf8544ccc9f999f7546317261cd6cad89b9cb97..f0e8c5dd9fae25915e7cf0d4885d5d7b9b0e821d 100644 (file)
@@ -70,6 +70,7 @@ enum inv_devices {
        INV_MPU6500,
        INV_MPU6000,
        INV_MPU9150,
+       INV_ICM20608,
        INV_NUM_PARTS
 };
 
@@ -225,6 +226,7 @@ struct inv_mpu6050_state {
 #define INV_MPU6050_WHOAMI_VALUE               0x68
 #define INV_MPU6500_WHOAMI_VALUE               0x70
 #define INV_MPU9150_WHOAMI_VALUE               0x68
+#define INV_ICM20608_WHOAMI_VALUE              0xAF
 
 /* scan element definition */
 enum inv_mpu6050_scan {
index d0700628ee6df32479d8a53de456f9d593bbe3f1..3a9f3eac91ab6a4d9311139861b52def36719d57 100644 (file)
@@ -107,7 +107,7 @@ irqreturn_t inv_mpu6050_irq_handler(int irq, void *p)
        struct inv_mpu6050_state *st = iio_priv(indio_dev);
        s64 timestamp;
 
-       timestamp = iio_get_time_ns();
+       timestamp = iio_get_time_ns(indio_dev);
        kfifo_in_spinlocked(&st->timestamps, &timestamp, 1,
                            &st->time_stamp_lock);
 
index 190a4a51c830055bfd7ce039162cb20efe210799..6e6476dfa188ed79bdace630f492ce866aacefe9 100644 (file)
@@ -82,6 +82,7 @@ static const struct spi_device_id inv_mpu_id[] = {
        {"mpu6000", INV_MPU6000},
        {"mpu6500", INV_MPU6500},
        {"mpu9150", INV_MPU9150},
+       {"icm20608", INV_ICM20608},
        {}
 };
 
index e6319a9346b2691e855cb5ea04920b8163f3692a..f914d5d140e4014f5a31011c7783959fe8022755 100644 (file)
@@ -80,6 +80,7 @@ static const char * const iio_chan_type_name_spec[] = {
        [IIO_RESISTANCE] = "resistance",
        [IIO_PH] = "ph",
        [IIO_UVINDEX] = "uvindex",
+       [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
 };
 
 static const char * const iio_modifier_names[] = {
@@ -177,6 +178,86 @@ ssize_t iio_read_const_attr(struct device *dev,
 }
 EXPORT_SYMBOL(iio_read_const_attr);
 
+static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
+{
+       int ret;
+       const struct iio_event_interface *ev_int = indio_dev->event_interface;
+
+       ret = mutex_lock_interruptible(&indio_dev->mlock);
+       if (ret)
+               return ret;
+       if ((ev_int && iio_event_enabled(ev_int)) ||
+           iio_buffer_enabled(indio_dev)) {
+               mutex_unlock(&indio_dev->mlock);
+               return -EBUSY;
+       }
+       indio_dev->clock_id = clock_id;
+       mutex_unlock(&indio_dev->mlock);
+
+       return 0;
+}
+
+/**
+ * iio_get_time_ns() - utility function to get a time stamp for events etc
+ * @indio_dev: device
+ */
+s64 iio_get_time_ns(const struct iio_dev *indio_dev)
+{
+       struct timespec tp;
+
+       switch (iio_device_get_clock(indio_dev)) {
+       case CLOCK_REALTIME:
+               ktime_get_real_ts(&tp);
+               break;
+       case CLOCK_MONOTONIC:
+               ktime_get_ts(&tp);
+               break;
+       case CLOCK_MONOTONIC_RAW:
+               getrawmonotonic(&tp);
+               break;
+       case CLOCK_REALTIME_COARSE:
+               tp = current_kernel_time();
+               break;
+       case CLOCK_MONOTONIC_COARSE:
+               tp = get_monotonic_coarse();
+               break;
+       case CLOCK_BOOTTIME:
+               get_monotonic_boottime(&tp);
+               break;
+       case CLOCK_TAI:
+               timekeeping_clocktai(&tp);
+               break;
+       default:
+               BUG();
+       }
+
+       return timespec_to_ns(&tp);
+}
+EXPORT_SYMBOL(iio_get_time_ns);
+
+/**
+ * iio_get_time_res() - utility function to get time stamp clock resolution in
+ *                      nano seconds.
+ * @indio_dev: device
+ */
+unsigned int iio_get_time_res(const struct iio_dev *indio_dev)
+{
+       switch (iio_device_get_clock(indio_dev)) {
+       case CLOCK_REALTIME:
+       case CLOCK_MONOTONIC:
+       case CLOCK_MONOTONIC_RAW:
+       case CLOCK_BOOTTIME:
+       case CLOCK_TAI:
+               return hrtimer_resolution;
+       case CLOCK_REALTIME_COARSE:
+       case CLOCK_MONOTONIC_COARSE:
+               return LOW_RES_NSEC;
+       default:
+               BUG();
+       }
+}
+EXPORT_SYMBOL(iio_get_time_res);
+
 static int __init iio_init(void)
 {
        int ret;
@@ -989,11 +1070,91 @@ static ssize_t iio_show_dev_name(struct device *dev,
 
 static DEVICE_ATTR(name, S_IRUGO, iio_show_dev_name, NULL);
 
+static ssize_t iio_show_timestamp_clock(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+       const clockid_t clk = iio_device_get_clock(indio_dev);
+       const char *name;
+       ssize_t sz;
+
+       switch (clk) {
+       case CLOCK_REALTIME:
+               name = "realtime\n";
+               sz = sizeof("realtime\n");
+               break;
+       case CLOCK_MONOTONIC:
+               name = "monotonic\n";
+               sz = sizeof("monotonic\n");
+               break;
+       case CLOCK_MONOTONIC_RAW:
+               name = "monotonic_raw\n";
+               sz = sizeof("monotonic_raw\n");
+               break;
+       case CLOCK_REALTIME_COARSE:
+               name = "realtime_coarse\n";
+               sz = sizeof("realtime_coarse\n");
+               break;
+       case CLOCK_MONOTONIC_COARSE:
+               name = "monotonic_coarse\n";
+               sz = sizeof("monotonic_coarse\n");
+               break;
+       case CLOCK_BOOTTIME:
+               name = "boottime\n";
+               sz = sizeof("boottime\n");
+               break;
+       case CLOCK_TAI:
+               name = "tai\n";
+               sz = sizeof("tai\n");
+               break;
+       default:
+               BUG();
+       }
+
+       memcpy(buf, name, sz);
+       return sz;
+}
+
+static ssize_t iio_store_timestamp_clock(struct device *dev,
+                                        struct device_attribute *attr,
+                                        const char *buf, size_t len)
+{
+       clockid_t clk;
+       int ret;
+
+       if (sysfs_streq(buf, "realtime"))
+               clk = CLOCK_REALTIME;
+       else if (sysfs_streq(buf, "monotonic"))
+               clk = CLOCK_MONOTONIC;
+       else if (sysfs_streq(buf, "monotonic_raw"))
+               clk = CLOCK_MONOTONIC_RAW;
+       else if (sysfs_streq(buf, "realtime_coarse"))
+               clk = CLOCK_REALTIME_COARSE;
+       else if (sysfs_streq(buf, "monotonic_coarse"))
+               clk = CLOCK_MONOTONIC_COARSE;
+       else if (sysfs_streq(buf, "boottime"))
+               clk = CLOCK_BOOTTIME;
+       else if (sysfs_streq(buf, "tai"))
+               clk = CLOCK_TAI;
+       else
+               return -EINVAL;
+
+       ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static DEVICE_ATTR(current_timestamp_clock, S_IRUGO | S_IWUSR,
+                  iio_show_timestamp_clock, iio_store_timestamp_clock);
+
 static int iio_device_register_sysfs(struct iio_dev *indio_dev)
 {
        int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
        struct iio_dev_attr *p;
-       struct attribute **attr;
+       struct attribute **attr, *clk = NULL;
 
        /* First count elements in any existing group */
        if (indio_dev->info->attrs) {
@@ -1008,16 +1169,25 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
         */
        if (indio_dev->channels)
                for (i = 0; i < indio_dev->num_channels; i++) {
-                       ret = iio_device_add_channel_sysfs(indio_dev,
-                                                          &indio_dev
-                                                          ->channels[i]);
+                       const struct iio_chan_spec *chan =
+                               &indio_dev->channels[i];
+
+                       if (chan->type == IIO_TIMESTAMP)
+                               clk = &dev_attr_current_timestamp_clock.attr;
+
+                       ret = iio_device_add_channel_sysfs(indio_dev, chan);
                        if (ret < 0)
                                goto error_clear_attrs;
                        attrcount += ret;
                }
 
+       if (indio_dev->event_interface)
+               clk = &dev_attr_current_timestamp_clock.attr;
+
        if (indio_dev->name)
                attrcount++;
+       if (clk)
+               attrcount++;
 
        indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
                                                   sizeof(indio_dev->chan_attr_group.attrs[0]),
@@ -1038,6 +1208,8 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
                indio_dev->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
        if (indio_dev->name)
                indio_dev->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
+       if (clk)
+               indio_dev->chan_attr_group.attrs[attrn++] = clk;
 
        indio_dev->groups[indio_dev->groupcounter++] =
                &indio_dev->chan_attr_group;
index cae332b1d7ea595959c53ecb3c8f51eef9ba095b..0ebfc923a9975a39326b38049f93cd3dfd1c0601 100644 (file)
@@ -44,6 +44,11 @@ struct iio_event_interface {
        struct mutex            read_lock;
 };
 
+bool iio_event_enabled(const struct iio_event_interface *ev_int)
+{
+       return !!test_bit(IIO_BUSY_BIT_POS, &ev_int->flags);
+}
+
 /**
  * iio_push_event() - try to add event to the list for userspace reading
  * @indio_dev:         IIO device structure
@@ -60,7 +65,7 @@ int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp)
        int copied;
 
        /* Does anyone care? */
-       if (test_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+       if (iio_event_enabled(ev_int)) {
 
                ev.id = ev_code;
                ev.timestamp = timestamp;
@@ -180,8 +185,14 @@ int iio_event_getfd(struct iio_dev *indio_dev)
        if (ev_int == NULL)
                return -ENODEV;
 
-       if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags))
-               return -EBUSY;
+       fd = mutex_lock_interruptible(&indio_dev->mlock);
+       if (fd)
+               return fd;
+
+       if (test_and_set_bit(IIO_BUSY_BIT_POS, &ev_int->flags)) {
+               fd = -EBUSY;
+               goto unlock;
+       }
 
        iio_device_get(indio_dev);
 
@@ -194,6 +205,8 @@ int iio_event_getfd(struct iio_dev *indio_dev)
                kfifo_reset_out(&ev_int->det_events);
        }
 
+unlock:
+       mutex_unlock(&indio_dev->mlock);
        return fd;
 }
 
diff --git a/drivers/iio/industrialio-sw-device.c b/drivers/iio/industrialio-sw-device.c
new file mode 100644 (file)
index 0000000..81b49cf
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * The Industrial I/O core, software IIO devices functions
+ *
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include <linux/iio/sw_device.h>
+#include <linux/iio/configfs.h>
+#include <linux/configfs.h>
+
+static struct config_group *iio_devices_group;
+static struct config_item_type iio_device_type_group_type;
+
+static struct config_item_type iio_devices_group_type = {
+       .ct_owner = THIS_MODULE,
+};
+
+static LIST_HEAD(iio_device_types_list);
+static DEFINE_MUTEX(iio_device_types_lock);
+
+static
+struct iio_sw_device_type *__iio_find_sw_device_type(const char *name,
+                                                    unsigned len)
+{
+       struct iio_sw_device_type *d = NULL, *iter;
+
+       list_for_each_entry(iter, &iio_device_types_list, list)
+               if (!strcmp(iter->name, name)) {
+                       d = iter;
+                       break;
+               }
+
+       return d;
+}
+
+int iio_register_sw_device_type(struct iio_sw_device_type *d)
+{
+       struct iio_sw_device_type *iter;
+       int ret = 0;
+
+       mutex_lock(&iio_device_types_lock);
+       iter = __iio_find_sw_device_type(d->name, strlen(d->name));
+       if (iter)
+               ret = -EBUSY;
+       else
+               list_add_tail(&d->list, &iio_device_types_list);
+       mutex_unlock(&iio_device_types_lock);
+
+       if (ret)
+               return ret;
+
+       d->group = configfs_register_default_group(iio_devices_group, d->name,
+                                               &iio_device_type_group_type);
+       if (IS_ERR(d->group))
+               ret = PTR_ERR(d->group);
+
+       return ret;
+}
+EXPORT_SYMBOL(iio_register_sw_device_type);
+
+void iio_unregister_sw_device_type(struct iio_sw_device_type *dt)
+{
+       struct iio_sw_device_type *iter;
+
+       mutex_lock(&iio_device_types_lock);
+       iter = __iio_find_sw_device_type(dt->name, strlen(dt->name));
+       if (iter)
+               list_del(&dt->list);
+       mutex_unlock(&iio_device_types_lock);
+
+       configfs_unregister_default_group(dt->group);
+}
+EXPORT_SYMBOL(iio_unregister_sw_device_type);
+
+static
+struct iio_sw_device_type *iio_get_sw_device_type(const char *name)
+{
+       struct iio_sw_device_type *dt;
+
+       mutex_lock(&iio_device_types_lock);
+       dt = __iio_find_sw_device_type(name, strlen(name));
+       if (dt && !try_module_get(dt->owner))
+               dt = NULL;
+       mutex_unlock(&iio_device_types_lock);
+
+       return dt;
+}
+
+struct iio_sw_device *iio_sw_device_create(const char *type, const char *name)
+{
+       struct iio_sw_device *d;
+       struct iio_sw_device_type *dt;
+
+       dt = iio_get_sw_device_type(type);
+       if (!dt) {
+               pr_err("Invalid device type: %s\n", type);
+               return ERR_PTR(-EINVAL);
+       }
+       d = dt->ops->probe(name);
+       if (IS_ERR(d))
+               goto out_module_put;
+
+       d->device_type = dt;
+
+       return d;
+out_module_put:
+       module_put(dt->owner);
+       return d;
+}
+EXPORT_SYMBOL(iio_sw_device_create);
+
+void iio_sw_device_destroy(struct iio_sw_device *d)
+{
+       struct iio_sw_device_type *dt = d->device_type;
+
+       dt->ops->remove(d);
+       module_put(dt->owner);
+}
+EXPORT_SYMBOL(iio_sw_device_destroy);
+
+static struct config_group *device_make_group(struct config_group *group,
+                                             const char *name)
+{
+       struct iio_sw_device *d;
+
+       d = iio_sw_device_create(group->cg_item.ci_name, name);
+       if (IS_ERR(d))
+               return ERR_CAST(d);
+
+       config_item_set_name(&d->group.cg_item, "%s", name);
+
+       return &d->group;
+}
+
+static void device_drop_group(struct config_group *group,
+                             struct config_item *item)
+{
+       struct iio_sw_device *d = to_iio_sw_device(item);
+
+       iio_sw_device_destroy(d);
+       config_item_put(item);
+}
+
+static struct configfs_group_operations device_ops = {
+       .make_group     = &device_make_group,
+       .drop_item      = &device_drop_group,
+};
+
+static struct config_item_type iio_device_type_group_type = {
+       .ct_group_ops = &device_ops,
+       .ct_owner       = THIS_MODULE,
+};
+
+static int __init iio_sw_device_init(void)
+{
+       iio_devices_group =
+               configfs_register_default_group(&iio_configfs_subsys.su_group,
+                                               "devices",
+                                               &iio_devices_group_type);
+       return PTR_ERR_OR_ZERO(iio_devices_group);
+}
+module_init(iio_sw_device_init);
+
+static void __exit iio_sw_device_exit(void)
+{
+       configfs_unregister_default_group(iio_devices_group);
+}
+module_exit(iio_sw_device_exit);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("Industrial I/O software devices support");
+MODULE_LICENSE("GPL v2");
index 0c52dfe649771db4a4a23e7e09365dbbc33932e1..7ad82fdd3e5bfb0e39565f93fb2491962f97e01b 100644 (file)
@@ -64,10 +64,16 @@ static struct attribute *iio_trig_dev_attrs[] = {
 };
 ATTRIBUTE_GROUPS(iio_trig_dev);
 
+static struct iio_trigger *__iio_trigger_find_by_name(const char *name);
+
 int iio_trigger_register(struct iio_trigger *trig_info)
 {
        int ret;
 
+       /* trig_info->ops is required for the module member */
+       if (!trig_info->ops)
+               return -EINVAL;
+
        trig_info->id = ida_simple_get(&iio_trigger_ida, 0, 0, GFP_KERNEL);
        if (trig_info->id < 0)
                return trig_info->id;
@@ -82,11 +88,19 @@ int iio_trigger_register(struct iio_trigger *trig_info)
 
        /* Add to list of available triggers held by the IIO core */
        mutex_lock(&iio_trigger_list_lock);
+       if (__iio_trigger_find_by_name(trig_info->name)) {
+               pr_err("Duplicate trigger name '%s'\n", trig_info->name);
+               ret = -EEXIST;
+               goto error_device_del;
+       }
        list_add_tail(&trig_info->list, &iio_trigger_list);
        mutex_unlock(&iio_trigger_list_lock);
 
        return 0;
 
+error_device_del:
+       mutex_unlock(&iio_trigger_list_lock);
+       device_del(&trig_info->dev);
 error_unregister_id:
        ida_simple_remove(&iio_trigger_ida, trig_info->id);
        return ret;
@@ -105,6 +119,18 @@ void iio_trigger_unregister(struct iio_trigger *trig_info)
 }
 EXPORT_SYMBOL(iio_trigger_unregister);
 
+/* Search for trigger by name, assuming iio_trigger_list_lock held */
+static struct iio_trigger *__iio_trigger_find_by_name(const char *name)
+{
+       struct iio_trigger *iter;
+
+       list_for_each_entry(iter, &iio_trigger_list, list)
+               if (!strcmp(iter->name, name))
+                       return iter;
+
+       return NULL;
+}
+
 static struct iio_trigger *iio_trigger_find_by_name(const char *name,
                                                    size_t len)
 {
@@ -164,8 +190,7 @@ EXPORT_SYMBOL(iio_trigger_poll_chained);
 
 void iio_trigger_notify_done(struct iio_trigger *trig)
 {
-       if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
-               trig->ops->try_reenable)
+       if (atomic_dec_and_test(&trig->use_count) && trig->ops->try_reenable)
                if (trig->ops->try_reenable(trig))
                        /* Missed an interrupt so launch new poll now */
                        iio_trigger_poll(trig);
@@ -224,7 +249,7 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
                goto out_put_irq;
 
        /* Enable trigger in driver */
-       if (trig->ops && trig->ops->set_trigger_state && notinuse) {
+       if (trig->ops->set_trigger_state && notinuse) {
                ret = trig->ops->set_trigger_state(trig, true);
                if (ret < 0)
                        goto out_free_irq;
@@ -249,7 +274,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
                = (bitmap_weight(trig->pool,
                                 CONFIG_IIO_CONSUMERS_PER_TRIGGER)
                   == 1);
-       if (trig->ops && trig->ops->set_trigger_state && no_other_users) {
+       if (trig->ops->set_trigger_state && no_other_users) {
                ret = trig->ops->set_trigger_state(trig, false);
                if (ret)
                        return ret;
@@ -264,7 +289,7 @@ static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
 irqreturn_t iio_pollfunc_store_time(int irq, void *p)
 {
        struct iio_poll_func *pf = p;
-       pf->timestamp = iio_get_time_ns();
+       pf->timestamp = iio_get_time_ns(pf->indio_dev);
        return IRQ_WAKE_THREAD;
 }
 EXPORT_SYMBOL(iio_pollfunc_store_time);
@@ -371,7 +396,7 @@ static ssize_t iio_trigger_write_current(struct device *dev,
                        return ret;
        }
 
-       if (trig && trig->ops && trig->ops->validate_device) {
+       if (trig && trig->ops->validate_device) {
                ret = trig->ops->validate_device(trig, indio_dev);
                if (ret)
                        return ret;
index 53201d99a16c8d760f4ec909c8c5fb1cba6bc8be..f0b47c501f4ecf2dba26ddca6fc60c12bd07cc84 100644 (file)
@@ -118,7 +118,7 @@ static void acpi_als_notify(struct acpi_device *device, u32 event)
        struct iio_dev *indio_dev = acpi_driver_data(device);
        struct acpi_als *als = iio_priv(indio_dev);
        s32 *buffer = als->evt_buffer;
-       s64 time_ns = iio_get_time_ns();
+       s64 time_ns = iio_get_time_ns(indio_dev);
        s32 val;
        int ret;
 
index 09ad5f1ce539a413e65084cac9032819d5b38cbf..0113fc843a8106b83d23b1ad01a81c732feda84e 100644 (file)
@@ -118,7 +118,7 @@ static irqreturn_t adjd_s311_trigger_handler(int irq, void *p)
        struct iio_poll_func *pf = p;
        struct iio_dev *indio_dev = pf->indio_dev;
        struct adjd_s311_data *data = iio_priv(indio_dev);
-       s64 time_ns = iio_get_time_ns();
+       s64 time_ns = iio_get_time_ns(indio_dev);
        int i, j = 0;
 
        int ret = adjd_s311_req_data(indio_dev);
index e1b9fa5a7e9153e0923f8c5f9d7df60928cfe875..649b26f678131f0797bb8ecd5e3fd6ef9f42cc03 100644 (file)
@@ -396,7 +396,7 @@ static irqreturn_t apds9300_interrupt_handler(int irq, void *private)
                       IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
                                            IIO_EV_TYPE_THRESH,
                                            IIO_EV_DIR_EITHER),
-                      iio_get_time_ns());
+                      iio_get_time_ns(dev_info));
 
        apds9300_clear_intr(data);
 
index 651d57b8abbf910b3e94ad9e628e312b958cbb50..a4304edc3e0fac6b161daa1342c6b99c967b4225 100644 (file)
@@ -807,7 +807,7 @@ static irqreturn_t apds9960_interrupt_handler(int irq, void *private)
                               IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
                                                    IIO_EV_TYPE_THRESH,
                                                    IIO_EV_DIR_EITHER),
-                              iio_get_time_ns());
+                              iio_get_time_ns(indio_dev));
                regmap_write(data->regmap, APDS9960_REG_CICLEAR, 1);
        }
 
@@ -816,7 +816,7 @@ static irqreturn_t apds9960_interrupt_handler(int irq, void *private)
                               IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
                                                    IIO_EV_TYPE_THRESH,
                                                    IIO_EV_DIR_EITHER),
-                              iio_get_time_ns());
+                              iio_get_time_ns(indio_dev));
                regmap_write(data->regmap, APDS9960_REG_PICLEAR, 1);
        }
 
index c8d7b5ea7e78c880b7bfc1cc1ff829c8c4c4b8aa..9d66e89c57ef6d6aba8bd78c26f5b55d979441e5 100644 (file)
@@ -268,7 +268,7 @@ static irqreturn_t cm36651_irq_handler(int irq, void *data)
                                CM36651_CMD_READ_RAW_PROXIMITY,
                                IIO_EV_TYPE_THRESH, ev_dir);
 
-       iio_push_event(indio_dev, ev_code, iio_get_time_ns());
+       iio_push_event(indio_dev, ev_code, iio_get_time_ns(indio_dev));
 
        return IRQ_HANDLED;
 }
index 6d41086f7c6462108ebcce2dd2dc7dc76ea8d519..6ada9149f1422b644a7340a770fa7f1537595a99 100644 (file)
@@ -851,7 +851,7 @@ static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data)
                                    GP2AP020A00F_SCAN_MODE_PROXIMITY,
                                    IIO_EV_TYPE_ROC,
                                    IIO_EV_DIR_RISING),
-                              iio_get_time_ns());
+                              iio_get_time_ns(indio_dev));
                } else {
                        iio_push_event(indio_dev,
                               IIO_UNMOD_EVENT_CODE(
@@ -859,7 +859,7 @@ static irqreturn_t gp2ap020a00f_prox_sensing_handler(int irq, void *data)
                                    GP2AP020A00F_SCAN_MODE_PROXIMITY,
                                    IIO_EV_TYPE_ROC,
                                    IIO_EV_DIR_FALLING),
-                              iio_get_time_ns());
+                              iio_get_time_ns(indio_dev));
                }
        }
 
@@ -925,7 +925,7 @@ static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data)
                                            IIO_MOD_LIGHT_CLEAR,
                                            IIO_EV_TYPE_THRESH,
                                            IIO_EV_DIR_RISING),
-                                      iio_get_time_ns());
+                                      iio_get_time_ns(indio_dev));
                }
 
                if (test_bit(GP2AP020A00F_FLAG_ALS_FALLING_EV, &priv->flags)) {
@@ -939,7 +939,7 @@ static irqreturn_t gp2ap020a00f_thresh_event_handler(int irq, void *data)
                                            IIO_MOD_LIGHT_CLEAR,
                                            IIO_EV_TYPE_THRESH,
                                            IIO_EV_DIR_FALLING),
-                                      iio_get_time_ns());
+                                      iio_get_time_ns(indio_dev));
                }
        }
 
@@ -1287,22 +1287,14 @@ static int gp2ap020a00f_read_raw(struct iio_dev *indio_dev,
        struct gp2ap020a00f_data *data = iio_priv(indio_dev);
        int err = -EINVAL;
 
-       mutex_lock(&data->lock);
-
-       switch (mask) {
-       case IIO_CHAN_INFO_RAW:
-               if (iio_buffer_enabled(indio_dev)) {
-                       err = -EBUSY;
-                       goto error_unlock;
-               }
+       if (mask == IIO_CHAN_INFO_RAW) {
+               err = iio_device_claim_direct_mode(indio_dev);
+               if (err)
+                       return err;
 
                err = gp2ap020a00f_read_channel(data, chan, val);
-               break;
+               iio_device_release_direct_mode(indio_dev);
        }
-
-error_unlock:
-       mutex_unlock(&data->lock);
-
        return err < 0 ? err : IIO_VAL_INT;
 }
 
index e2945a20e5f621a7740300b0aa1e48ca732ba16f..1d2c0c8a1d4f063dfa2db4a125889c23d8acc8ee 100644 (file)
 #define ISL29125_MODE_B 0x3
 #define ISL29125_MODE_RGB 0x5
 
+#define ISL29125_SENSING_RANGE_0 5722   /* 375 lux full range */
+#define ISL29125_SENSING_RANGE_1 152590 /* 10k lux full range */
+
 #define ISL29125_MODE_RANGE BIT(3)
 
 #define ISL29125_STATUS_CONV BIT(1)
 
 struct isl29125_data {
        struct i2c_client *client;
-       struct mutex lock;
        u8 conf1;
        u16 buffer[8]; /* 3x 16-bit, padding, 8 bytes timestamp */
 };
@@ -128,11 +130,11 @@ static int isl29125_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
-               if (iio_buffer_enabled(indio_dev))
-                       return -EBUSY;
-               mutex_lock(&data->lock);
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
                ret = isl29125_read_data(data, chan->scan_index);
-               mutex_unlock(&data->lock);
+               iio_device_release_direct_mode(indio_dev);
                if (ret < 0)
                        return ret;
                *val = ret;
@@ -140,9 +142,9 @@ static int isl29125_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_SCALE:
                *val = 0;
                if (data->conf1 & ISL29125_MODE_RANGE)
-                       *val2 = 152590; /* 10k lux full range */
+                       *val2 = ISL29125_SENSING_RANGE_1; /*10k lux full range*/
                else
-                       *val2 = 5722; /* 375 lux full range */
+                       *val2 = ISL29125_SENSING_RANGE_0; /*375 lux full range*/
                return IIO_VAL_INT_PLUS_MICRO;
        }
        return -EINVAL;
@@ -158,9 +160,9 @@ static int isl29125_write_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_SCALE:
                if (val != 0)
                        return -EINVAL;
-               if (val2 == 152590)
+               if (val2 == ISL29125_SENSING_RANGE_1)
                        data->conf1 |= ISL29125_MODE_RANGE;
-               else if (val2 == 5722)
+               else if (val2 == ISL29125_SENSING_RANGE_0)
                        data->conf1 &= ~ISL29125_MODE_RANGE;
                else
                        return -EINVAL;
@@ -189,7 +191,7 @@ static irqreturn_t isl29125_trigger_handler(int irq, void *p)
        }
 
        iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
-               iio_get_time_ns());
+               iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
@@ -259,7 +261,6 @@ static int isl29125_probe(struct i2c_client *client,
        data = iio_priv(indio_dev);
        i2c_set_clientdata(client, indio_dev);
        data->client = client;
-       mutex_init(&data->lock);
 
        indio_dev->dev.parent = &client->dev;
        indio_dev->info = &isl29125_info;
index 99a62816c3b439559b5145c70dacf96585cb8c76..e8a8931b4f50d0a2c1db1959a05b6722b2621d25 100644 (file)
@@ -325,9 +325,6 @@ static int jsa1212_probe(struct i2c_client *client,
        struct regmap *regmap;
        int ret;
 
-       if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
-               return -EOPNOTSUPP;
-
        indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
        if (!indio_dev)
                return -ENOMEM;
index e56937c40a189a8f076440c9e2f361755a81cbc8..f409c2047c050104437ca0387bccb5c401c6a602 100644 (file)
@@ -267,7 +267,7 @@ static irqreturn_t lm3533_als_isr(int irq, void *dev_id)
                                            0,
                                            IIO_EV_TYPE_THRESH,
                                            IIO_EV_DIR_EITHER),
-                      iio_get_time_ns());
+                      iio_get_time_ns(indio_dev));
 out:
        return IRQ_HANDLED;
 }
index 6bf89d8f374191cee48f258d1c25810b0e5dc410..3afc53a3d0b607cd0c02ae27e5fb3b7e1a8f3a40 100644 (file)
@@ -1256,7 +1256,8 @@ static irqreturn_t ltr501_trigger_handler(int irq, void *p)
                buf[j++] = psdata & LTR501_PS_DATA_MASK;
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+                                          iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
@@ -1282,14 +1283,14 @@ static irqreturn_t ltr501_interrupt_handler(int irq, void *private)
                               IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
                                                    IIO_EV_TYPE_THRESH,
                                                    IIO_EV_DIR_EITHER),
-                              iio_get_time_ns());
+                              iio_get_time_ns(indio_dev));
 
        if (status & LTR501_STATUS_PS_INTR)
                iio_push_event(indio_dev,
                               IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
                                                    IIO_EV_TYPE_THRESH,
                                                    IIO_EV_DIR_EITHER),
-                              iio_get_time_ns());
+                              iio_get_time_ns(indio_dev));
 
        return IRQ_HANDLED;
 }
index f17cb2ea18f59c9ea4f368e26488e50d114b05ec..6511b20a2a2966b7db3987e46dae98774f4d5540 100644 (file)
@@ -511,7 +511,8 @@ static irqreturn_t max44000_trigger_handler(int irq, void *p)
        }
        mutex_unlock(&data->lock);
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+                                          iio_get_time_ns(indio_dev));
        iio_trigger_notify_done(indio_dev->trig);
        return IRQ_HANDLED;
 
index b776c8ed4387dc289eef2b5810b4217346c8f10f..78c9b3a6453ae57286923a42d9284774b2a696c0 100644 (file)
@@ -713,13 +713,13 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
                                        IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
                                                        IIO_EV_TYPE_THRESH,
                                                        IIO_EV_DIR_RISING),
-                                       iio_get_time_ns());
+                                       iio_get_time_ns(iio));
                if (ret & OPT3001_CONFIGURATION_FL)
                        iio_push_event(iio,
                                        IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
                                                        IIO_EV_TYPE_THRESH,
                                                        IIO_EV_DIR_FALLING),
-                                       iio_get_time_ns());
+                                       iio_get_time_ns(iio));
        } else if (ret & OPT3001_CONFIGURATION_CRF) {
                ret = i2c_smbus_read_word_swapped(opt->client, OPT3001_RESULT);
                if (ret < 0) {
index 9e847f8f4f0cd8b04bbc498f77625d46501a47ec..45cf8b0a43637863456240d6c6bfce9e42db9790 100644 (file)
@@ -528,7 +528,7 @@ static irqreturn_t stk3310_irq_handler(int irq, void *private)
        struct iio_dev *indio_dev = private;
        struct stk3310_data *data = iio_priv(indio_dev);
 
-       data->timestamp = iio_get_time_ns();
+       data->timestamp = iio_get_time_ns(indio_dev);
 
        return IRQ_WAKE_THREAD;
 }
index f90f8c5919fee96447a681ef2475208a9572dea8..a795afb7667bdeb7ec23987a64aaa897a88ed890 100644 (file)
@@ -53,7 +53,6 @@
 
 struct tcs3414_data {
        struct i2c_client *client;
-       struct mutex lock;
        u8 control;
        u8 gain;
        u8 timing;
@@ -134,16 +133,16 @@ static int tcs3414_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
-               if (iio_buffer_enabled(indio_dev))
-                       return -EBUSY;
-               mutex_lock(&data->lock);
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
                ret = tcs3414_req_data(data);
                if (ret < 0) {
-                       mutex_unlock(&data->lock);
+                       iio_device_release_direct_mode(indio_dev);
                        return ret;
                }
                ret = i2c_smbus_read_word_data(data->client, chan->address);
-               mutex_unlock(&data->lock);
+               iio_device_release_direct_mode(indio_dev);
                if (ret < 0)
                        return ret;
                *val = ret;
@@ -217,7 +216,7 @@ static irqreturn_t tcs3414_trigger_handler(int irq, void *p)
        }
 
        iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
-               iio_get_time_ns());
+               iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
@@ -288,7 +287,6 @@ static int tcs3414_probe(struct i2c_client *client,
        data = iio_priv(indio_dev);
        i2c_set_clientdata(client, indio_dev);
        data->client = client;
-       mutex_init(&data->lock);
 
        indio_dev->dev.parent = &client->dev;
        indio_dev->info = &tcs3414_info;
index 1b530bf04c8984d30feb70938b56772c72b16b98..3aa71e34ae28b39350033616805eb6f2d5480714 100644 (file)
@@ -52,7 +52,6 @@
 
 struct tcs3472_data {
        struct i2c_client *client;
-       struct mutex lock;
        u8 enable;
        u8 control;
        u8 atime;
@@ -117,17 +116,16 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
-               if (iio_buffer_enabled(indio_dev))
-                       return -EBUSY;
-
-               mutex_lock(&data->lock);
+               ret = iio_device_claim_direct_mode(indio_dev);
+               if (ret)
+                       return ret;
                ret = tcs3472_req_data(data);
                if (ret < 0) {
-                       mutex_unlock(&data->lock);
+                       iio_device_release_direct_mode(indio_dev);
                        return ret;
                }
                ret = i2c_smbus_read_word_data(data->client, chan->address);
-               mutex_unlock(&data->lock);
+               iio_device_release_direct_mode(indio_dev);
                if (ret < 0)
                        return ret;
                *val = ret;
@@ -204,7 +202,7 @@ static irqreturn_t tcs3472_trigger_handler(int irq, void *p)
        }
 
        iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
-               iio_get_time_ns());
+               iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
@@ -263,7 +261,6 @@ static int tcs3472_probe(struct i2c_client *client,
        data = iio_priv(indio_dev);
        i2c_set_clientdata(client, indio_dev);
        data->client = client;
-       mutex_init(&data->lock);
 
        indio_dev->dev.parent = &client->dev;
        indio_dev->info = &tcs3472_info;
index 57b108c30e9801165658a9fef26d3b0e44586b94..04598ae993d44913a822e61f74901fb194076a01 100644 (file)
@@ -630,7 +630,7 @@ static irqreturn_t tsl2563_event_handler(int irq, void *private)
                                            0,
                                            IIO_EV_TYPE_THRESH,
                                            IIO_EV_DIR_EITHER),
-                      iio_get_time_ns());
+                      iio_get_time_ns(dev_info));
 
        /* clear the interrupt and push the event */
        i2c_smbus_write_byte(chip->client, TSL2563_CMD | TSL2563_CLEARINT);
index 45bc2f742f46af4c3485da83f3920fc2cabaff37..20c40f7809643342756db86060b98ea6536053e9 100644 (file)
@@ -833,7 +833,7 @@ static irqreturn_t us5182d_irq_thread_handler(int irq, void *private)
        dir = ret & US5182D_CFG0_PROX ? IIO_EV_DIR_RISING : IIO_EV_DIR_FALLING;
        ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 1, IIO_EV_TYPE_THRESH, dir);
 
-       iio_push_event(indio_dev, ev, iio_get_time_ns());
+       iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev));
 
        ret = i2c_smbus_write_byte_data(data->client, US5182D_REG_CFG0,
                                        ret & ~US5182D_CFG0_PX_IRQ);
index 84e6559ccc65d3df05c6e9c96c5fb7a2419cc711..1f842abcb4a4db4ff8b9b24159819f2f9a241274 100644 (file)
@@ -44,6 +44,7 @@ config BMC150_MAGN_I2C
          This driver is only implementing magnetometer part, which has
          its own address and register map.
 
+         This driver also supports I2C Bosch BMC156 and BMM150 chips.
          To compile this driver as a module, choose M here: the module will be
          called bmc150_magn_i2c.
 
@@ -60,6 +61,7 @@ config BMC150_MAGN_SPI
          This driver is only implementing magnetometer part, which has
          its own address and register map.
 
+         This driver also supports SPI Bosch BMC156 and BMM150 chips.
          To compile this driver as a module, choose M here: the module will be
          called bmc150_magn_spi.
 
index 609a2c401b5dcf27e80ad76dbc23d2c4cad99ebf..af8606cc78123c6843633757a2aed19e3c4cf72d 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/of_gpio.h>
 #include <linux/acpi.h>
 #include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
 
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
@@ -379,37 +380,40 @@ struct ak8975_data {
        u8                      cntl_cache;
        struct iio_mount_matrix orientation;
        struct regulator        *vdd;
+       struct regulator        *vid;
 };
 
 /* Enable attached power regulator if any. */
-static int ak8975_power_on(struct i2c_client *client)
+static int ak8975_power_on(const struct ak8975_data *data)
 {
-       const struct iio_dev *indio_dev = i2c_get_clientdata(client);
-       struct ak8975_data *data = iio_priv(indio_dev);
        int ret;
 
-       data->vdd = devm_regulator_get(&client->dev, "vdd");
-       if (IS_ERR_OR_NULL(data->vdd)) {
-               ret = PTR_ERR(data->vdd);
-               if (ret == -ENODEV)
-                       ret = 0;
-       } else {
-               ret = regulator_enable(data->vdd);
+       ret = regulator_enable(data->vdd);
+       if (ret) {
+               dev_warn(&data->client->dev,
+                        "Failed to enable specified Vdd supply\n");
+               return ret;
        }
-
-       if (ret)
-               dev_err(&client->dev, "failed to enable Vdd supply: %d\n", ret);
-       return ret;
+       ret = regulator_enable(data->vid);
+       if (ret) {
+               dev_warn(&data->client->dev,
+                        "Failed to enable specified Vid supply\n");
+               return ret;
+       }
+       /*
+        * According to the datasheet the power supply rise time i 200us
+        * and the minimum wait time before mode setting is 100us, in
+        * total 300 us. Add some margin and say minimum 500us here.
+        */
+       usleep_range(500, 1000);
+       return 0;
 }
 
 /* Disable attached power regulator if any. */
-static void ak8975_power_off(const struct i2c_client *client)
+static void ak8975_power_off(const struct ak8975_data *data)
 {
-       const struct iio_dev *indio_dev = i2c_get_clientdata(client);
-       const struct ak8975_data *data = iio_priv(indio_dev);
-
-       if (!IS_ERR_OR_NULL(data->vdd))
-               regulator_disable(data->vdd);
+       regulator_disable(data->vid);
+       regulator_disable(data->vdd);
 }
 
 /*
@@ -430,8 +434,8 @@ static int ak8975_who_i_am(struct i2c_client *client,
         * AK8975   |  DEVICE_ID |  NA
         * AK8963   |  DEVICE_ID |  NA
         */
-       ret = i2c_smbus_read_i2c_block_data(client, AK09912_REG_WIA1,
-                                           2, wia_val);
+       ret = i2c_smbus_read_i2c_block_data_or_emulated(
+                       client, AK09912_REG_WIA1, 2, wia_val);
        if (ret < 0) {
                dev_err(&client->dev, "Error reading WIA\n");
                return ret;
@@ -543,9 +547,9 @@ static int ak8975_setup(struct i2c_client *client)
        }
 
        /* Get asa data and store in the device data. */
-       ret = i2c_smbus_read_i2c_block_data(client,
-                                           data->def->ctrl_regs[ASA_BASE],
-                                           3, data->asa);
+       ret = i2c_smbus_read_i2c_block_data_or_emulated(
+                       client, data->def->ctrl_regs[ASA_BASE],
+                       3, data->asa);
        if (ret < 0) {
                dev_err(&client->dev, "Not able to read asa data\n");
                return ret;
@@ -686,22 +690,31 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
        struct ak8975_data *data = iio_priv(indio_dev);
        const struct i2c_client *client = data->client;
        const struct ak_def *def = data->def;
+       u16 buff;
        int ret;
 
+       pm_runtime_get_sync(&data->client->dev);
+
        mutex_lock(&data->lock);
 
        ret = ak8975_start_read_axis(data, client);
        if (ret)
                goto exit;
 
-       ret = i2c_smbus_read_word_data(client, def->data_regs[index]);
+       ret = i2c_smbus_read_i2c_block_data_or_emulated(
+                       client, def->data_regs[index],
+                       sizeof(buff), (u8*)&buff);
        if (ret < 0)
                goto exit;
 
        mutex_unlock(&data->lock);
 
-       /* Clamp to valid range. */
-       *val = clamp_t(s16, ret, -def->range, def->range);
+       pm_runtime_mark_last_busy(&data->client->dev);
+       pm_runtime_put_autosuspend(&data->client->dev);
+
+       /* Swap bytes and convert to valid range. */
+       buff = le16_to_cpu(buff);
+       *val = clamp_t(s16, buff, -def->range, def->range);
        return IIO_VAL_INT;
 
 exit:
@@ -825,7 +838,8 @@ static void ak8975_fill_buffer(struct iio_dev *indio_dev)
        buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
        buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buff, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, buff,
+                                          iio_get_time_ns(indio_dev));
        return;
 
 unlock:
@@ -919,7 +933,15 @@ static int ak8975_probe(struct i2c_client *client,
 
        data->def = &ak_def_array[chipset];
 
-       err = ak8975_power_on(client);
+       /* Fetch the regulators */
+       data->vdd = devm_regulator_get(&client->dev, "vdd");
+       if (IS_ERR(data->vdd))
+               return PTR_ERR(data->vdd);
+       data->vid = devm_regulator_get(&client->dev, "vid");
+       if (IS_ERR(data->vid))
+               return PTR_ERR(data->vid);
+
+       err = ak8975_power_on(data);
        if (err)
                return err;
 
@@ -959,26 +981,93 @@ static int ak8975_probe(struct i2c_client *client,
                goto cleanup_buffer;
        }
 
+       /* Enable runtime PM */
+       pm_runtime_get_noresume(&client->dev);
+       pm_runtime_set_active(&client->dev);
+       pm_runtime_enable(&client->dev);
+       /*
+        * The device comes online in 500us, so add two orders of magnitude
+        * of delay before autosuspending: 50 ms.
+        */
+       pm_runtime_set_autosuspend_delay(&client->dev, 50);
+       pm_runtime_use_autosuspend(&client->dev);
+       pm_runtime_put(&client->dev);
+
        return 0;
 
 cleanup_buffer:
        iio_triggered_buffer_cleanup(indio_dev);
 power_off:
-       ak8975_power_off(client);
+       ak8975_power_off(data);
        return err;
 }
 
 static int ak8975_remove(struct i2c_client *client)
 {
        struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct ak8975_data *data = iio_priv(indio_dev);
 
+       pm_runtime_get_sync(&client->dev);
+       pm_runtime_put_noidle(&client->dev);
+       pm_runtime_disable(&client->dev);
        iio_device_unregister(indio_dev);
        iio_triggered_buffer_cleanup(indio_dev);
-       ak8975_power_off(client);
+       ak8975_set_mode(data, POWER_DOWN);
+       ak8975_power_off(data);
 
        return 0;
 }
 
+#ifdef CONFIG_PM
+static int ak8975_runtime_suspend(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct ak8975_data *data = iio_priv(indio_dev);
+       int ret;
+
+       /* Set the device in power down if it wasn't already */
+       ret = ak8975_set_mode(data, POWER_DOWN);
+       if (ret < 0) {
+               dev_err(&client->dev, "Error in setting power-down mode\n");
+               return ret;
+       }
+       /* Next cut the regulators */
+       ak8975_power_off(data);
+
+       return 0;
+}
+
+static int ak8975_runtime_resume(struct device *dev)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct ak8975_data *data = iio_priv(indio_dev);
+       int ret;
+
+       /* Take up the regulators */
+       ak8975_power_on(data);
+       /*
+        * We come up in powered down mode, the reading routines will
+        * put us in the mode to read values later.
+        */
+       ret = ak8975_set_mode(data, POWER_DOWN);
+       if (ret < 0) {
+               dev_err(&client->dev, "Error in setting power-down mode\n");
+               return ret;
+       }
+
+       return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops ak8975_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(ak8975_runtime_suspend,
+                          ak8975_runtime_resume, NULL)
+};
+
 static const struct i2c_device_id ak8975_id[] = {
        {"ak8975", AK8975},
        {"ak8963", AK8963},
@@ -1006,6 +1095,7 @@ MODULE_DEVICE_TABLE(of, ak8975_of_match);
 static struct i2c_driver ak8975_driver = {
        .driver = {
                .name   = "ak8975",
+               .pm = &ak8975_dev_pm_ops,
                .of_match_table = of_match_ptr(ak8975_of_match),
                .acpi_match_table = ACPI_PTR(ak_acpi_match),
        },
index eddc7f0d0096c114775be9e22dfd161baed25f1c..ee05722587aa53c81803230d7471967c7ba5115e 100644 (file)
@@ -2,6 +2,7 @@
  * 3-axis magnetometer driver supporting following I2C Bosch-Sensortec chips:
  *  - BMC150
  *  - BMC156
+ *  - BMM150
  *
  * Copyright (c) 2016, Intel Corporation.
  *
@@ -49,6 +50,7 @@ static int bmc150_magn_i2c_remove(struct i2c_client *client)
 static const struct acpi_device_id bmc150_magn_acpi_match[] = {
        {"BMC150B", 0},
        {"BMC156B", 0},
+       {"BMM150B", 0},
        {},
 };
 MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
@@ -56,6 +58,7 @@ MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
 static const struct i2c_device_id bmc150_magn_i2c_id[] = {
        {"bmc150_magn", 0},
        {"bmc156_magn", 0},
+       {"bmm150_magn", 0},
        {}
 };
 MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id);
index c4c738a0769537c6fa2591b902bad5b1cf02cbfd..7d4152d4d01e310605a0f283417248d8d3c52d84 100644 (file)
@@ -2,6 +2,7 @@
  * 3-axis magnetometer driver support following SPI Bosch-Sensortec chips:
  *  - BMC150
  *  - BMC156
+ *  - BMM150
  *
  * Copyright (c) 2016, Intel Corporation.
  *
@@ -41,6 +42,7 @@ static int bmc150_magn_spi_remove(struct spi_device *spi)
 static const struct spi_device_id bmc150_magn_spi_id[] = {
        {"bmc150_magn", 0},
        {"bmc156_magn", 0},
+       {"bmm150_magn", 0},
        {}
 };
 MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
@@ -48,6 +50,7 @@ MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
 static const struct acpi_device_id bmc150_magn_acpi_match[] = {
        {"BMC150B", 0},
        {"BMC156B", 0},
+       {"BMM150B", 0},
        {},
 };
 MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
index 77882b466e0f481b8c0d15180134b60a0afbebec..ba3e2a374ee5f51e9a3528fbfddb65aeccc1e7b6 100644 (file)
@@ -451,7 +451,7 @@ static irqreturn_t hmc5843_trigger_handler(int irq, void *p)
                goto done;
 
        iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
-                                          iio_get_time_ns());
+                                          iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
index 261d517428e475d49b89d9d8567666abe42750a0..f2be4a04905665426a198bfda9628beaa365d7cb 100644 (file)
@@ -261,7 +261,7 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
        }
 
        iio_push_to_buffers_with_timestamp(indio_dev, buffer,
-               iio_get_time_ns());
+               iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
index 8250fc322c56754aa23ea4c877ff3414b1f58d81..3e1f06b2224cb40e59cdcf8c99be1ef7c98403b1 100644 (file)
@@ -589,13 +589,15 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
        indio_dev->info = &magn_info;
        mutex_init(&mdata->tb.buf_lock);
 
-       st_sensors_power_enable(indio_dev);
+       err = st_sensors_power_enable(indio_dev);
+       if (err)
+               return err;
 
        err = st_sensors_check_device_support(indio_dev,
                                        ARRAY_SIZE(st_magn_sensors_settings),
                                        st_magn_sensors_settings);
        if (err < 0)
-               return err;
+               goto st_magn_power_off;
 
        mdata->num_data_channels = ST_MAGN_NUMBER_DATA_CHANNELS;
        mdata->multiread_bit = mdata->sensor_settings->multi_read_bit;
@@ -608,11 +610,11 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
 
        err = st_sensors_init_sensor(indio_dev, NULL);
        if (err < 0)
-               return err;
+               goto st_magn_power_off;
 
        err = st_magn_allocate_ring(indio_dev);
        if (err < 0)
-               return err;
+               goto st_magn_power_off;
 
        if (irq > 0) {
                err = st_sensors_allocate_trigger(indio_dev,
@@ -635,6 +637,8 @@ st_magn_device_register_error:
                st_sensors_deallocate_trigger(indio_dev);
 st_magn_probe_trigger_error:
        st_magn_deallocate_ring(indio_dev);
+st_magn_power_off:
+       st_sensors_power_disable(indio_dev);
 
        return err;
 }
index 6acb23810bb46e4ba645bddf06e3f600d0f87685..2e9da1cf3297fc1da5fc3e62ab5432e60b6c80be 100644 (file)
@@ -10,11 +10,22 @@ config DS1803
        depends on I2C
        help
          Say yes here to build support for the Maxim Integrated DS1803
-         digital potentiomenter chip.
+         digital potentiometer chip.
 
          To compile this driver as a module, choose M here: the
          module will be called ds1803.
 
+config MAX5487
+        tristate "Maxim MAX5487/MAX5488/MAX5489 Digital Potentiometer driver"
+        depends on SPI
+        help
+          Say yes here to build support for the Maxim
+          MAX5487, MAX5488, MAX5489 digital potentiometer
+          chips.
+
+          To compile this driver as a module, choose M here: the
+          module will be called max5487.
+
 config MCP4131
        tristate "Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer driver"
        depends on SPI
@@ -28,7 +39,7 @@ config MCP4131
          MCP4241, MCP4242,
          MCP4251, MCP4252,
          MCP4261, MCP4262,
-         digital potentiomenter chips.
+         digital potentiometer chips.
 
          To compile this driver as a module, choose M here: the
          module will be called mcp4131.
@@ -38,9 +49,11 @@ config MCP4531
        depends on I2C
        help
          Say yes here to build support for the Microchip
-         MCP4531, MCP4532, MCP4551, MCP4552,
-         MCP4631, MCP4632, MCP4651, MCP4652
-         digital potentiomenter chips.
+         MCP4531, MCP4532, MCP4541, MCP4542,
+         MCP4551, MCP4552, MCP4561, MCP4562,
+         MCP4631, MCP4632, MCP4641, MCP4642,
+         MCP4651, MCP4652, MCP4661, MCP4662
+         digital potentiometer chips.
 
          To compile this driver as a module, choose M here: the
          module will be called mcp4531.
index 6007faa2fb0279dc2a338aa985acf6b3a09abb25..8adb58f38c0b8498f5e5638943f1cb8de3685334 100644 (file)
@@ -4,6 +4,7 @@
 
 # When adding new entries keep the list in alphabetical order
 obj-$(CONFIG_DS1803) += ds1803.o
+obj-$(CONFIG_MAX5487) += max5487.o
 obj-$(CONFIG_MCP4131) += mcp4131.o
 obj-$(CONFIG_MCP4531) += mcp4531.o
 obj-$(CONFIG_TPL0102) += tpl0102.o
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
new file mode 100644 (file)
index 0000000..6c50939
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * max5487.c - Support for MAX5487, MAX5488, MAX5489 digital potentiometers
+ *
+ * Copyright (C) 2016 Cristina-Gabriela Moraru <cristina.moraru09@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+
+#include <linux/iio/sysfs.h>
+#include <linux/iio/iio.h>
+
+#define MAX5487_WRITE_WIPER_A  (0x01 << 8)
+#define MAX5487_WRITE_WIPER_B  (0x02 << 8)
+
+/* copy both wiper regs to NV regs */
+#define MAX5487_COPY_AB_TO_NV  (0x23 << 8)
+/* copy both NV regs to wiper regs */
+#define MAX5487_COPY_NV_TO_AB  (0x33 << 8)
+
+#define MAX5487_MAX_POS                255
+
+struct max5487_data {
+       struct spi_device *spi;
+       int kohms;
+};
+
+#define MAX5487_CHANNEL(ch, addr) {                            \
+       .type = IIO_RESISTANCE,                                 \
+       .indexed = 1,                                           \
+       .output = 1,                                            \
+       .channel = ch,                                          \
+       .address = addr,                                        \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),           \
+       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),   \
+}
+
+static const struct iio_chan_spec max5487_channels[] = {
+       MAX5487_CHANNEL(0, MAX5487_WRITE_WIPER_A),
+       MAX5487_CHANNEL(1, MAX5487_WRITE_WIPER_B),
+};
+
+static int max5487_write_cmd(struct spi_device *spi, u16 cmd)
+{
+       return spi_write(spi, (const void *) &cmd, sizeof(u16));
+}
+
+static int max5487_read_raw(struct iio_dev *indio_dev,
+                           struct iio_chan_spec const *chan,
+                           int *val, int *val2, long mask)
+{
+       struct max5487_data *data = iio_priv(indio_dev);
+
+       if (mask != IIO_CHAN_INFO_SCALE)
+               return -EINVAL;
+
+       *val = 1000 * data->kohms;
+       *val2 = MAX5487_MAX_POS;
+
+       return IIO_VAL_FRACTIONAL;
+}
+
+static int max5487_write_raw(struct iio_dev *indio_dev,
+                            struct iio_chan_spec const *chan,
+                            int val, int val2, long mask)
+{
+       struct max5487_data *data = iio_priv(indio_dev);
+
+       if (mask != IIO_CHAN_INFO_RAW)
+               return -EINVAL;
+
+       if (val < 0 || val > MAX5487_MAX_POS)
+               return -EINVAL;
+
+       return max5487_write_cmd(data->spi, chan->address | val);
+}
+
+static const struct iio_info max5487_info = {
+       .read_raw = max5487_read_raw,
+       .write_raw = max5487_write_raw,
+       .driver_module = THIS_MODULE,
+};
+
+static int max5487_spi_probe(struct spi_device *spi)
+{
+       struct iio_dev *indio_dev;
+       struct max5487_data *data;
+       const struct spi_device_id *id = spi_get_device_id(spi);
+       int ret;
+
+       indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*data));
+       if (!indio_dev)
+               return -ENOMEM;
+
+       dev_set_drvdata(&spi->dev, indio_dev);
+       data = iio_priv(indio_dev);
+
+       data->spi = spi;
+       data->kohms = id->driver_data;
+
+       indio_dev->info = &max5487_info;
+       indio_dev->name = id->name;
+       indio_dev->dev.parent = &spi->dev;
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = max5487_channels;
+       indio_dev->num_channels = ARRAY_SIZE(max5487_channels);
+
+       /* restore both wiper regs from NV regs */
+       ret = max5487_write_cmd(data->spi, MAX5487_COPY_NV_TO_AB);
+       if (ret < 0)
+               return ret;
+
+       return iio_device_register(indio_dev);
+}
+
+static int max5487_spi_remove(struct spi_device *spi)
+{
+       struct iio_dev *indio_dev = dev_get_drvdata(&spi->dev);
+
+       iio_device_unregister(indio_dev);
+
+       /* save both wiper regs to NV regs */
+       return max5487_write_cmd(spi, MAX5487_COPY_AB_TO_NV);
+}
+
+static const struct spi_device_id max5487_id[] = {
+       { "MAX5487", 10 },
+       { "MAX5488", 50 },
+       { "MAX5489", 100 },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, max5487_id);
+
+static const struct acpi_device_id max5487_acpi_match[] = {
+       { "MAX5487", 10 },
+       { "MAX5488", 50 },
+       { "MAX5489", 100 },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, max5487_acpi_match);
+
+static struct spi_driver max5487_driver = {
+       .driver = {
+               .name = "max5487",
+               .owner = THIS_MODULE,
+               .acpi_match_table = ACPI_PTR(max5487_acpi_match),
+       },
+       .id_table = max5487_id,
+       .probe = max5487_spi_probe,
+       .remove = max5487_spi_remove
+};
+module_spi_driver(max5487_driver);
+
+MODULE_AUTHOR("Cristina-Gabriela Moraru <cristina.moraru09@gmail.com>");
+MODULE_DESCRIPTION("max5487 SPI driver");
+MODULE_LICENSE("GPL v2");
index 3b72e1a595db8634d6b94baf36aebcb29ec37ef6..13b6ae2fcf7b2cda84553ec95372cfc38020fcfc 100644 (file)
@@ -8,12 +8,20 @@
  * DEVID       #Wipers #Positions      Resistor Opts (kOhm)    i2c address
  * mcp4531     1       129             5, 10, 50, 100          010111x
  * mcp4532     1       129             5, 10, 50, 100          01011xx
+ * mcp4541     1       129             5, 10, 50, 100          010111x
+ * mcp4542     1       129             5, 10, 50, 100          01011xx
  * mcp4551     1       257             5, 10, 50, 100          010111x
  * mcp4552     1       257             5, 10, 50, 100          01011xx
+ * mcp4561     1       257             5, 10, 50, 100          010111x
+ * mcp4562     1       257             5, 10, 50, 100          01011xx
  * mcp4631     2       129             5, 10, 50, 100          0101xxx
  * mcp4632     2       129             5, 10, 50, 100          01011xx
+ * mcp4641     2       129             5, 10, 50, 100          0101xxx
+ * mcp4642     2       129             5, 10, 50, 100          01011xx
  * mcp4651     2       257             5, 10, 50, 100          0101xxx
  * mcp4652     2       257             5, 10, 50, 100          01011xx
+ * mcp4661     2       257             5, 10, 50, 100          0101xxx
+ * mcp4662     2       257             5, 10, 50, 100          01011xx
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
@@ -23,6 +31,8 @@
 #include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include <linux/iio/iio.h>
 
@@ -37,18 +47,34 @@ enum mcp4531_type {
        MCP453x_103,
        MCP453x_503,
        MCP453x_104,
+       MCP454x_502,
+       MCP454x_103,
+       MCP454x_503,
+       MCP454x_104,
        MCP455x_502,
        MCP455x_103,
        MCP455x_503,
        MCP455x_104,
+       MCP456x_502,
+       MCP456x_103,
+       MCP456x_503,
+       MCP456x_104,
        MCP463x_502,
        MCP463x_103,
        MCP463x_503,
        MCP463x_104,
+       MCP464x_502,
+       MCP464x_103,
+       MCP464x_503,
+       MCP464x_104,
        MCP465x_502,
        MCP465x_103,
        MCP465x_503,
        MCP465x_104,
+       MCP466x_502,
+       MCP466x_103,
+       MCP466x_503,
+       MCP466x_104,
 };
 
 static const struct mcp4531_cfg mcp4531_cfg[] = {
@@ -56,18 +82,34 @@ static const struct mcp4531_cfg mcp4531_cfg[] = {
        [MCP453x_103] = { .wipers = 1, .max_pos = 128, .kohms =  10, },
        [MCP453x_503] = { .wipers = 1, .max_pos = 128, .kohms =  50, },
        [MCP453x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
+       [MCP454x_502] = { .wipers = 1, .max_pos = 128, .kohms =   5, },
+       [MCP454x_103] = { .wipers = 1, .max_pos = 128, .kohms =  10, },
+       [MCP454x_503] = { .wipers = 1, .max_pos = 128, .kohms =  50, },
+       [MCP454x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
        [MCP455x_502] = { .wipers = 1, .max_pos = 256, .kohms =   5, },
        [MCP455x_103] = { .wipers = 1, .max_pos = 256, .kohms =  10, },
        [MCP455x_503] = { .wipers = 1, .max_pos = 256, .kohms =  50, },
        [MCP455x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
+       [MCP456x_502] = { .wipers = 1, .max_pos = 256, .kohms =   5, },
+       [MCP456x_103] = { .wipers = 1, .max_pos = 256, .kohms =  10, },
+       [MCP456x_503] = { .wipers = 1, .max_pos = 256, .kohms =  50, },
+       [MCP456x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
        [MCP463x_502] = { .wipers = 2, .max_pos = 128, .kohms =   5, },
        [MCP463x_103] = { .wipers = 2, .max_pos = 128, .kohms =  10, },
        [MCP463x_503] = { .wipers = 2, .max_pos = 128, .kohms =  50, },
        [MCP463x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
+       [MCP464x_502] = { .wipers = 2, .max_pos = 128, .kohms =   5, },
+       [MCP464x_103] = { .wipers = 2, .max_pos = 128, .kohms =  10, },
+       [MCP464x_503] = { .wipers = 2, .max_pos = 128, .kohms =  50, },
+       [MCP464x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
        [MCP465x_502] = { .wipers = 2, .max_pos = 256, .kohms =   5, },
        [MCP465x_103] = { .wipers = 2, .max_pos = 256, .kohms =  10, },
        [MCP465x_503] = { .wipers = 2, .max_pos = 256, .kohms =  50, },
        [MCP465x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+       [MCP466x_502] = { .wipers = 2, .max_pos = 256, .kohms =   5, },
+       [MCP466x_103] = { .wipers = 2, .max_pos = 256, .kohms =  10, },
+       [MCP466x_503] = { .wipers = 2, .max_pos = 256, .kohms =  50, },
+       [MCP466x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
 };
 
 #define MCP4531_WRITE (0 << 2)
@@ -148,12 +190,89 @@ static const struct iio_info mcp4531_info = {
        .driver_module = THIS_MODULE,
 };
 
+#ifdef CONFIG_OF
+
+#define MCP4531_COMPATIBLE(of_compatible, cfg) {       \
+                       .compatible = of_compatible,    \
+                       .data = &mcp4531_cfg[cfg],      \
+}
+
+static const struct of_device_id mcp4531_of_match[] = {
+       MCP4531_COMPATIBLE("microchip,mcp4531-502", MCP453x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4531-103", MCP453x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4531-503", MCP453x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4531-104", MCP453x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4532-502", MCP453x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4532-103", MCP453x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4532-503", MCP453x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4532-104", MCP453x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4541-502", MCP454x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4541-103", MCP454x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4541-503", MCP454x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4541-104", MCP454x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4542-502", MCP454x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4542-103", MCP454x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4542-503", MCP454x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4542-104", MCP454x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4551-502", MCP455x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4551-103", MCP455x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4551-503", MCP455x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4551-104", MCP455x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4552-502", MCP455x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4552-103", MCP455x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4552-503", MCP455x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4552-104", MCP455x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4561-502", MCP456x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4561-103", MCP456x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4561-503", MCP456x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4561-104", MCP456x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4562-502", MCP456x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4562-103", MCP456x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4562-503", MCP456x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4562-104", MCP456x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4631-502", MCP463x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4631-103", MCP463x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4631-503", MCP463x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4631-104", MCP463x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4632-502", MCP463x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4632-103", MCP463x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4632-503", MCP463x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4632-104", MCP463x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4641-502", MCP464x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4641-103", MCP464x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4641-503", MCP464x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4641-104", MCP464x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4642-502", MCP464x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4642-103", MCP464x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4642-503", MCP464x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4642-104", MCP464x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4651-502", MCP465x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4651-103", MCP465x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4651-503", MCP465x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4651-104", MCP465x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4652-502", MCP465x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4652-103", MCP465x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4652-503", MCP465x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4652-104", MCP465x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4661-502", MCP466x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4661-103", MCP466x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4661-503", MCP466x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4661-104", MCP466x_104),
+       MCP4531_COMPATIBLE("microchip,mcp4662-502", MCP466x_502),
+       MCP4531_COMPATIBLE("microchip,mcp4662-103", MCP466x_103),
+       MCP4531_COMPATIBLE("microchip,mcp4662-503", MCP466x_503),
+       MCP4531_COMPATIBLE("microchip,mcp4662-104", MCP466x_104),
+       { /* sentinel */ }
+};
+#endif
+
 static int mcp4531_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        struct device *dev = &client->dev;
        struct mcp4531_data *data;
        struct iio_dev *indio_dev;
+       const struct of_device_id *match;
 
        if (!i2c_check_functionality(client->adapter,
                                     I2C_FUNC_SMBUS_WORD_DATA)) {
@@ -167,7 +286,12 @@ static int mcp4531_probe(struct i2c_client *client,
        data = iio_priv(indio_dev);
        i2c_set_clientdata(client, indio_dev);
        data->client = client;
-       data->cfg = &mcp4531_cfg[id->driver_data];
+
+       match = of_match_device(of_match_ptr(mcp4531_of_match), dev);
+       if (match)
+               data->cfg = of_device_get_match_data(dev);
+       else
+               data->cfg = &mcp4531_cfg[id->driver_data];
 
        indio_dev->dev.parent = dev;
        indio_dev->info = &mcp4531_info;
@@ -187,6 +311,14 @@ static const struct i2c_device_id mcp4531_id[] = {
        { "mcp4532-103", MCP453x_103 },
        { "mcp4532-503", MCP453x_503 },
        { "mcp4532-104", MCP453x_104 },
+       { "mcp4541-502", MCP454x_502 },
+       { "mcp4541-103", MCP454x_103 },
+       { "mcp4541-503", MCP454x_503 },
+       { "mcp4541-104", MCP454x_104 },
+       { "mcp4542-502", MCP454x_502 },
+       { "mcp4542-103", MCP454x_103 },
+       { "mcp4542-503", MCP454x_503 },
+       { "mcp4542-104", MCP454x_104 },
        { "mcp4551-502", MCP455x_502 },
        { "mcp4551-103", MCP455x_103 },
        { "mcp4551-503", MCP455x_503 },
@@ -195,6 +327,14 @@ static const struct i2c_device_id mcp4531_id[] = {
        { "mcp4552-103", MCP455x_103 },
        { "mcp4552-503", MCP455x_503 },
        { "mcp4552-104", MCP455x_104 },
+       { "mcp4561-502", MCP456x_502 },
+       { "mcp4561-103", MCP456x_103 },
+       { "mcp4561-503", MCP456x_503 },
+       { "mcp4561-104", MCP456x_104 },
+       { "mcp4562-502", MCP456x_502 },
+       { "mcp4562-103", MCP456x_103 },
+       { "mcp4562-503", MCP456x_503 },
+       { "mcp4562-104", MCP456x_104 },
        { "mcp4631-502", MCP463x_502 },
        { "mcp4631-103", MCP463x_103 },
        { "mcp4631-503", MCP463x_503 },
@@ -203,6 +343,14 @@ static const struct i2c_device_id mcp4531_id[] = {
        { "mcp4632-103", MCP463x_103 },
        { "mcp4632-503", MCP463x_503 },
        { "mcp4632-104", MCP463x_104 },
+       { "mcp4641-502", MCP464x_502 },
+       { "mcp4641-103", MCP464x_103 },
+       { "mcp4641-503", MCP464x_503 },
+       { "mcp4641-104", MCP464x_104 },
+       { "mcp4642-502", MCP464x_502 },
+       { "mcp4642-103", MCP464x_103 },
+       { "mcp4642-503", MCP464x_503 },
+       { "mcp4642-104", MCP464x_104 },
        { "mcp4651-502", MCP465x_502 },
        { "mcp4651-103", MCP465x_103 },
        { "mcp4651-503", MCP465x_503 },
@@ -211,6 +359,14 @@ static const struct i2c_device_id mcp4531_id[] = {
        { "mcp4652-103", MCP465x_103 },
        { "mcp4652-503", MCP465x_503 },
        { "mcp4652-104", MCP465x_104 },
+       { "mcp4661-502", MCP466x_502 },
+       { "mcp4661-103", MCP466x_103 },
+       { "mcp4661-503", MCP466x_503 },
+       { "mcp4661-104", MCP466x_104 },
+       { "mcp4662-502", MCP466x_502 },
+       { "mcp4662-103", MCP466x_103 },
+       { "mcp4662-503", MCP466x_503 },
+       { "mcp4662-104", MCP466x_104 },
        {}
 };
 MODULE_DEVICE_TABLE(i2c, mcp4531_id);
@@ -218,6 +374,7 @@ MODULE_DEVICE_TABLE(i2c, mcp4531_id);
 static struct i2c_driver mcp4531_driver = {
        .driver = {
                .name   = "mcp4531",
+               .of_match_table = of_match_ptr(mcp4531_of_match),
        },
        .probe          = mcp4531_probe,
        .id_table       = mcp4531_id,
index 5c304d42d7139594c2437257c77516d212535a72..7b6b54531ea235bf315f876d7f51577417604139 100644 (file)
@@ -116,10 +116,6 @@ static int tpl0102_probe(struct i2c_client *client,
        struct tpl0102_data *data;
        struct iio_dev *indio_dev;
 
-       if (!i2c_check_functionality(client->adapter,
-                                    I2C_FUNC_SMBUS_WORD_DATA))
-               return -EOPNOTSUPP;
-
        indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
        if (!indio_dev)
                return -ENOMEM;
index cda9f128f3a4ec66736250121d374683d3a713bb..d130cdc78f4344a885b1fac599a1330beb4ee3eb 100644 (file)
@@ -6,16 +6,33 @@
 menu "Pressure sensors"
 
 config BMP280
-       tristate "Bosch Sensortec BMP180 and BMP280 pressure sensor driver"
-       depends on I2C
+       tristate "Bosch Sensortec BMP180/BMP280 pressure sensor I2C driver"
+       depends on (I2C || SPI_MASTER)
        depends on !(BMP085_I2C=y || BMP085_I2C=m)
-       select REGMAP_I2C
+       depends on !(BMP085_SPI=y || BMP085_SPI=m)
+       select REGMAP
+       select BMP280_I2C if (I2C)
+       select BMP280_SPI if (SPI_MASTER)
        help
          Say yes here to build support for Bosch Sensortec BMP180 and BMP280
-         pressure and temperature sensors.
+         pressure and temperature sensors. Also supports the BE280 with
+         an additional humidity sensor channel.
 
-         To compile this driver as a module, choose M here: the module
-         will be called bmp280.
+         To compile this driver as a module, choose M here: the core module
+         will be called bmp280 and you will also get bmp280-i2c for I2C
+         and/or bmp280-spi for SPI support.
+
+config BMP280_I2C
+       tristate
+       depends on BMP280
+       depends on I2C
+       select REGMAP_I2C
+
+config BMP280_SPI
+       tristate
+       depends on BMP280
+       depends on SPI_MASTER
+       select REGMAP
 
 config HID_SENSOR_PRESS
        depends on HID_SENSOR_HUB
@@ -130,7 +147,7 @@ config IIO_ST_PRESS
        select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
        help
          Say yes here to build support for STMicroelectronics pressure
-         sensors: LPS001WP, LPS25H, LPS331AP.
+         sensors: LPS001WP, LPS25H, LPS331AP, LPS22HB.
 
          This driver can also be built as a module. If so, these modules
          will be created:
index 17d6e7afa1ff2d207c07fa3a3b5437b707de222f..7f395bed5e88d87df6cb018443efee0e6802887d 100644 (file)
@@ -4,6 +4,9 @@
 
 # When adding new entries keep the list in alphabetical order
 obj-$(CONFIG_BMP280) += bmp280.o
+bmp280-objs := bmp280-core.o bmp280-regmap.o
+obj-$(CONFIG_BMP280_I2C) += bmp280-i2c.o
+obj-$(CONFIG_BMP280_SPI) += bmp280-spi.o
 obj-$(CONFIG_HID_SENSOR_PRESS)   += hid-sensor-press.o
 obj-$(CONFIG_HP03) += hp03.o
 obj-$(CONFIG_MPL115) += mpl115.o
similarity index 58%
rename from drivers/iio/pressure/bmp280.c
rename to drivers/iio/pressure/bmp280-core.c
index 724452d61846c67155120876af55921f56e54c6a..6943688e66dfbebc5edae2959f4f43e558786c0a 100644 (file)
@@ -1,5 +1,9 @@
 /*
+ * Copyright (c) 2010 Christoph Mair <christoph.mair@gmail.com>
+ * Copyright (c) 2012 Bosch Sensortec GmbH
+ * Copyright (c) 2012 Unixphere AB
  * Copyright (c) 2014 Intel Corporation
+ * Copyright (c) 2016 Linus Walleij <linus.walleij@linaro.org>
  *
  * Driver for Bosch Sensortec BMP180 and BMP280 digital pressure sensor.
  *
  * Datasheet:
  * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP180-DS000-121.pdf
  * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP280-DS001-12.pdf
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BME280_DS001-11.pdf
  */
 
 #define pr_fmt(fmt) "bmp280: " fmt
 
+#include <linux/device.h>
 #include <linux/module.h>
-#include <linux/i2c.h>
-#include <linux/acpi.h>
 #include <linux/regmap.h>
 #include <linux/delay.h>
 #include <linux/iio/iio.h>
 #include <linux/iio/sysfs.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h> /* For irq_get_irq_data() */
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
 
-/* BMP280 specific registers */
-#define BMP280_REG_TEMP_XLSB           0xFC
-#define BMP280_REG_TEMP_LSB            0xFB
-#define BMP280_REG_TEMP_MSB            0xFA
-#define BMP280_REG_PRESS_XLSB          0xF9
-#define BMP280_REG_PRESS_LSB           0xF8
-#define BMP280_REG_PRESS_MSB           0xF7
-
-#define BMP280_REG_CONFIG              0xF5
-#define BMP280_REG_STATUS              0xF3
-
-#define BMP280_REG_COMP_TEMP_START     0x88
-#define BMP280_COMP_TEMP_REG_COUNT     6
-
-#define BMP280_REG_COMP_PRESS_START    0x8E
-#define BMP280_COMP_PRESS_REG_COUNT    18
-
-#define BMP280_FILTER_MASK             (BIT(4) | BIT(3) | BIT(2))
-#define BMP280_FILTER_OFF              0
-#define BMP280_FILTER_2X               BIT(2)
-#define BMP280_FILTER_4X               BIT(3)
-#define BMP280_FILTER_8X               (BIT(3) | BIT(2))
-#define BMP280_FILTER_16X              BIT(4)
-
-#define BMP280_OSRS_TEMP_MASK          (BIT(7) | BIT(6) | BIT(5))
-#define BMP280_OSRS_TEMP_SKIP          0
-#define BMP280_OSRS_TEMP_X(osrs_t)     ((osrs_t) << 5)
-#define BMP280_OSRS_TEMP_1X            BMP280_OSRS_TEMP_X(1)
-#define BMP280_OSRS_TEMP_2X            BMP280_OSRS_TEMP_X(2)
-#define BMP280_OSRS_TEMP_4X            BMP280_OSRS_TEMP_X(3)
-#define BMP280_OSRS_TEMP_8X            BMP280_OSRS_TEMP_X(4)
-#define BMP280_OSRS_TEMP_16X           BMP280_OSRS_TEMP_X(5)
-
-#define BMP280_OSRS_PRESS_MASK         (BIT(4) | BIT(3) | BIT(2))
-#define BMP280_OSRS_PRESS_SKIP         0
-#define BMP280_OSRS_PRESS_X(osrs_p)    ((osrs_p) << 2)
-#define BMP280_OSRS_PRESS_1X           BMP280_OSRS_PRESS_X(1)
-#define BMP280_OSRS_PRESS_2X           BMP280_OSRS_PRESS_X(2)
-#define BMP280_OSRS_PRESS_4X           BMP280_OSRS_PRESS_X(3)
-#define BMP280_OSRS_PRESS_8X           BMP280_OSRS_PRESS_X(4)
-#define BMP280_OSRS_PRESS_16X          BMP280_OSRS_PRESS_X(5)
-
-#define BMP280_MODE_MASK               (BIT(1) | BIT(0))
-#define BMP280_MODE_SLEEP              0
-#define BMP280_MODE_FORCED             BIT(0)
-#define BMP280_MODE_NORMAL             (BIT(1) | BIT(0))
-
-/* BMP180 specific registers */
-#define BMP180_REG_OUT_XLSB            0xF8
-#define BMP180_REG_OUT_LSB             0xF7
-#define BMP180_REG_OUT_MSB             0xF6
-
-#define BMP180_REG_CALIB_START         0xAA
-#define BMP180_REG_CALIB_COUNT         22
-
-#define BMP180_MEAS_SCO                        BIT(5)
-#define BMP180_MEAS_TEMP               (0x0E | BMP180_MEAS_SCO)
-#define BMP180_MEAS_PRESS_X(oss)       ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
-#define BMP180_MEAS_PRESS_1X           BMP180_MEAS_PRESS_X(0)
-#define BMP180_MEAS_PRESS_2X           BMP180_MEAS_PRESS_X(1)
-#define BMP180_MEAS_PRESS_4X           BMP180_MEAS_PRESS_X(2)
-#define BMP180_MEAS_PRESS_8X           BMP180_MEAS_PRESS_X(3)
-
-/* BMP180 and BMP280 common registers */
-#define BMP280_REG_CTRL_MEAS           0xF4
-#define BMP280_REG_RESET               0xE0
-#define BMP280_REG_ID                  0xD0
-
-#define BMP180_CHIP_ID                 0x55
-#define BMP280_CHIP_ID                 0x58
-#define BMP280_SOFT_RESET_VAL          0xB6
+#include "bmp280.h"
+
+/*
+ * These enums are used for indexing into the array of calibration
+ * coefficients for BMP180.
+ */
+enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
+
+struct bmp180_calib {
+       s16 AC1;
+       s16 AC2;
+       s16 AC3;
+       u16 AC4;
+       u16 AC5;
+       u16 AC6;
+       s16 B1;
+       s16 B2;
+       s16 MB;
+       s16 MC;
+       s16 MD;
+};
 
 struct bmp280_data {
-       struct i2c_client *client;
+       struct device *dev;
        struct mutex lock;
        struct regmap *regmap;
+       struct completion done;
+       bool use_eoc;
        const struct bmp280_chip_info *chip_info;
+       struct bmp180_calib calib;
+       struct regulator *vddd;
+       struct regulator *vdda;
+       unsigned int start_up_time; /* in milliseconds */
 
        /* log of base 2 of oversampling rate */
        u8 oversampling_press;
        u8 oversampling_temp;
+       u8 oversampling_humid;
 
        /*
         * Carryover value from temperature conversion, used in pressure
@@ -112,17 +80,19 @@ struct bmp280_data {
 };
 
 struct bmp280_chip_info {
-       const struct regmap_config *regmap_config;
-
        const int *oversampling_temp_avail;
        int num_oversampling_temp_avail;
 
        const int *oversampling_press_avail;
        int num_oversampling_press_avail;
 
+       const int *oversampling_humid_avail;
+       int num_oversampling_humid_avail;
+
        int (*chip_config)(struct bmp280_data *);
        int (*read_temp)(struct bmp280_data *, int *);
        int (*read_press)(struct bmp280_data *, int *, int *);
+       int (*read_humid)(struct bmp280_data *, int *, int *);
 };
 
 /*
@@ -143,45 +113,75 @@ static const struct iio_chan_spec bmp280_channels[] = {
                .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
                                      BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
        },
+       {
+               .type = IIO_HUMIDITYRELATIVE,
+               .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+                                     BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+       },
 };
 
-static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
-{
-       switch (reg) {
-       case BMP280_REG_CONFIG:
-       case BMP280_REG_CTRL_MEAS:
-       case BMP280_REG_RESET:
-               return true;
-       default:
-               return false;
-       };
-}
+/*
+ * Returns humidity in percent, resolution is 0.01 percent. Output value of
+ * "47445" represents 47445/1024 = 46.333 %RH.
+ *
+ * Taken from BME280 datasheet, Section 4.2.3, "Compensation formula".
+ */
 
-static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+static u32 bmp280_compensate_humidity(struct bmp280_data *data,
+                                     s32 adc_humidity)
 {
-       switch (reg) {
-       case BMP280_REG_TEMP_XLSB:
-       case BMP280_REG_TEMP_LSB:
-       case BMP280_REG_TEMP_MSB:
-       case BMP280_REG_PRESS_XLSB:
-       case BMP280_REG_PRESS_LSB:
-       case BMP280_REG_PRESS_MSB:
-       case BMP280_REG_STATUS:
-               return true;
-       default:
-               return false;
+       struct device *dev = data->dev;
+       unsigned int H1, H3, tmp;
+       int H2, H4, H5, H6, ret, var;
+
+       ret = regmap_read(data->regmap, BMP280_REG_COMP_H1, &H1);
+       if (ret < 0) {
+               dev_err(dev, "failed to read H1 comp value\n");
+               return ret;
        }
-}
 
-static const struct regmap_config bmp280_regmap_config = {
-       .reg_bits = 8,
-       .val_bits = 8,
+       ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H2, &tmp, 2);
+       if (ret < 0) {
+               dev_err(dev, "failed to read H2 comp value\n");
+               return ret;
+       }
+       H2 = sign_extend32(le16_to_cpu(tmp), 15);
+
+       ret = regmap_read(data->regmap, BMP280_REG_COMP_H3, &H3);
+       if (ret < 0) {
+               dev_err(dev, "failed to read H3 comp value\n");
+               return ret;
+       }
+
+       ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H4, &tmp, 2);
+       if (ret < 0) {
+               dev_err(dev, "failed to read H4 comp value\n");
+               return ret;
+       }
+       H4 = sign_extend32(((be16_to_cpu(tmp) >> 4) & 0xff0) |
+                         (be16_to_cpu(tmp) & 0xf), 11);
+
+       ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_H5, &tmp, 2);
+       if (ret < 0) {
+               dev_err(dev, "failed to read H5 comp value\n");
+               return ret;
+       }
+       H5 = sign_extend32(((le16_to_cpu(tmp) >> 4) & 0xfff), 11);
+
+       ret = regmap_read(data->regmap, BMP280_REG_COMP_H6, &tmp);
+       if (ret < 0) {
+               dev_err(dev, "failed to read H6 comp value\n");
+               return ret;
+       }
+       H6 = sign_extend32(tmp, 7);
 
-       .max_register = BMP280_REG_TEMP_XLSB,
-       .cache_type = REGCACHE_RBTREE,
+       var = ((s32)data->t_fine) - 76800;
+       var = ((((adc_humidity << 14) - (H4 << 20) - (H5 * var)) + 16384) >> 15)
+               * (((((((var * H6) >> 10) * (((var * H3) >> 11) + 32768)) >> 10)
+               + 2097152) * H2 + 8192) >> 14);
+       var -= ((((var >> 15) * (var >> 15)) >> 7) * H1) >> 4;
 
-       .writeable_reg = bmp280_is_writeable_reg,
-       .volatile_reg = bmp280_is_volatile_reg,
+       return var >> 12;
 };
 
 /*
@@ -201,7 +201,7 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
        ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_TEMP_START,
                               buf, BMP280_COMP_TEMP_REG_COUNT);
        if (ret < 0) {
-               dev_err(&data->client->dev,
+               dev_err(data->dev,
                        "failed to read temperature calibration parameters\n");
                return ret;
        }
@@ -241,7 +241,7 @@ static u32 bmp280_compensate_press(struct bmp280_data *data,
        ret = regmap_bulk_read(data->regmap, BMP280_REG_COMP_PRESS_START,
                               buf, BMP280_COMP_PRESS_REG_COUNT);
        if (ret < 0) {
-               dev_err(&data->client->dev,
+               dev_err(data->dev,
                        "failed to read pressure calibration parameters\n");
                return ret;
        }
@@ -276,7 +276,7 @@ static int bmp280_read_temp(struct bmp280_data *data,
        ret = regmap_bulk_read(data->regmap, BMP280_REG_TEMP_MSB,
                               (u8 *) &tmp, 3);
        if (ret < 0) {
-               dev_err(&data->client->dev, "failed to read temperature\n");
+               dev_err(data->dev, "failed to read temperature\n");
                return ret;
        }
 
@@ -311,7 +311,7 @@ static int bmp280_read_press(struct bmp280_data *data,
        ret = regmap_bulk_read(data->regmap, BMP280_REG_PRESS_MSB,
                               (u8 *) &tmp, 3);
        if (ret < 0) {
-               dev_err(&data->client->dev, "failed to read pressure\n");
+               dev_err(data->dev, "failed to read pressure\n");
                return ret;
        }
 
@@ -324,6 +324,34 @@ static int bmp280_read_press(struct bmp280_data *data,
        return IIO_VAL_FRACTIONAL;
 }
 
+static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
+{
+       int ret;
+       __be16 tmp = 0;
+       s32 adc_humidity;
+       u32 comp_humidity;
+
+       /* Read and compensate temperature so we get a reading of t_fine. */
+       ret = bmp280_read_temp(data, NULL);
+       if (ret < 0)
+               return ret;
+
+       ret = regmap_bulk_read(data->regmap, BMP280_REG_HUMIDITY_MSB,
+                              (u8 *) &tmp, 2);
+       if (ret < 0) {
+               dev_err(data->dev, "failed to read humidity\n");
+               return ret;
+       }
+
+       adc_humidity = be16_to_cpu(tmp);
+       comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
+
+       *val = comp_humidity;
+       *val2 = 1024;
+
+       return IIO_VAL_FRACTIONAL;
+}
+
 static int bmp280_read_raw(struct iio_dev *indio_dev,
                           struct iio_chan_spec const *chan,
                           int *val, int *val2, long mask)
@@ -331,11 +359,15 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
        int ret;
        struct bmp280_data *data = iio_priv(indio_dev);
 
+       pm_runtime_get_sync(data->dev);
        mutex_lock(&data->lock);
 
        switch (mask) {
        case IIO_CHAN_INFO_PROCESSED:
                switch (chan->type) {
+               case IIO_HUMIDITYRELATIVE:
+                       ret = data->chip_info->read_humid(data, val, val2);
+                       break;
                case IIO_PRESSURE:
                        ret = data->chip_info->read_press(data, val, val2);
                        break;
@@ -349,6 +381,10 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
                break;
        case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
                switch (chan->type) {
+               case IIO_HUMIDITYRELATIVE:
+                       *val = 1 << data->oversampling_humid;
+                       ret = IIO_VAL_INT;
+                       break;
                case IIO_PRESSURE:
                        *val = 1 << data->oversampling_press;
                        ret = IIO_VAL_INT;
@@ -368,10 +404,29 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
        }
 
        mutex_unlock(&data->lock);
+       pm_runtime_mark_last_busy(data->dev);
+       pm_runtime_put_autosuspend(data->dev);
 
        return ret;
 }
 
+static int bmp280_write_oversampling_ratio_humid(struct bmp280_data *data,
+                                              int val)
+{
+       int i;
+       const int *avail = data->chip_info->oversampling_humid_avail;
+       const int n = data->chip_info->num_oversampling_humid_avail;
+
+       for (i = 0; i < n; i++) {
+               if (avail[i] == val) {
+                       data->oversampling_humid = ilog2(val);
+
+                       return data->chip_info->chip_config(data);
+               }
+       }
+       return -EINVAL;
+}
+
 static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
                                               int val)
 {
@@ -415,8 +470,12 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+               pm_runtime_get_sync(data->dev);
                mutex_lock(&data->lock);
                switch (chan->type) {
+               case IIO_HUMIDITYRELATIVE:
+                       ret = bmp280_write_oversampling_ratio_humid(data, val);
+                       break;
                case IIO_PRESSURE:
                        ret = bmp280_write_oversampling_ratio_press(data, val);
                        break;
@@ -428,6 +487,8 @@ static int bmp280_write_raw(struct iio_dev *indio_dev,
                        break;
                }
                mutex_unlock(&data->lock);
+               pm_runtime_mark_last_busy(data->dev);
+               pm_runtime_put_autosuspend(data->dev);
                break;
        default:
                return -EINVAL;
@@ -502,7 +563,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
                                 BMP280_MODE_MASK,
                                 osrs | BMP280_MODE_NORMAL);
        if (ret < 0) {
-               dev_err(&data->client->dev,
+               dev_err(data->dev,
                        "failed to write ctrl_meas register\n");
                return ret;
        }
@@ -511,7 +572,7 @@ static int bmp280_chip_config(struct bmp280_data *data)
                                 BMP280_FILTER_MASK,
                                 BMP280_FILTER_4X);
        if (ret < 0) {
-               dev_err(&data->client->dev,
+               dev_err(data->dev,
                        "failed to write config register\n");
                return ret;
        }
@@ -522,8 +583,6 @@ static int bmp280_chip_config(struct bmp280_data *data)
 static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
 
 static const struct bmp280_chip_info bmp280_chip_info = {
-       .regmap_config = &bmp280_regmap_config,
-
        .oversampling_temp_avail = bmp280_oversampling_avail,
        .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
 
@@ -535,39 +594,32 @@ static const struct bmp280_chip_info bmp280_chip_info = {
        .read_press = bmp280_read_press,
 };
 
-static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg)
+static int bme280_chip_config(struct bmp280_data *data)
 {
-       switch (reg) {
-       case BMP280_REG_CTRL_MEAS:
-       case BMP280_REG_RESET:
-               return true;
-       default:
-               return false;
-       };
-}
+       int ret = bmp280_chip_config(data);
+       u8 osrs = BMP280_OSRS_HUMIDITIY_X(data->oversampling_humid + 1);
 
-static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg)
-{
-       switch (reg) {
-       case BMP180_REG_OUT_XLSB:
-       case BMP180_REG_OUT_LSB:
-       case BMP180_REG_OUT_MSB:
-       case BMP280_REG_CTRL_MEAS:
-               return true;
-       default:
-               return false;
-       }
+       if (ret < 0)
+               return ret;
+
+       return regmap_update_bits(data->regmap, BMP280_REG_CTRL_HUMIDITY,
+                                 BMP280_OSRS_HUMIDITY_MASK, osrs);
 }
 
-static const struct regmap_config bmp180_regmap_config = {
-       .reg_bits = 8,
-       .val_bits = 8,
+static const struct bmp280_chip_info bme280_chip_info = {
+       .oversampling_temp_avail = bmp280_oversampling_avail,
+       .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+       .oversampling_press_avail = bmp280_oversampling_avail,
+       .num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
 
-       .max_register = BMP180_REG_OUT_XLSB,
-       .cache_type = REGCACHE_RBTREE,
+       .oversampling_humid_avail = bmp280_oversampling_avail,
+       .num_oversampling_humid_avail = ARRAY_SIZE(bmp280_oversampling_avail),
 
-       .writeable_reg = bmp180_is_writeable_reg,
-       .volatile_reg = bmp180_is_volatile_reg,
+       .chip_config = bme280_chip_config,
+       .read_temp = bmp280_read_temp,
+       .read_press = bmp280_read_press,
+       .read_humid = bmp280_read_humid,
 };
 
 static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
@@ -577,16 +629,32 @@ static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
        unsigned int delay_us;
        unsigned int ctrl;
 
+       if (data->use_eoc)
+               init_completion(&data->done);
+
        ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
        if (ret)
                return ret;
 
-       if (ctrl_meas == BMP180_MEAS_TEMP)
-               delay_us = 4500;
-       else
-               delay_us = conversion_time_max[data->oversampling_press];
-
-       usleep_range(delay_us, delay_us + 1000);
+       if (data->use_eoc) {
+               /*
+                * If we have a completion interrupt, use it, wait up to
+                * 100ms. The longest conversion time listed is 76.5 ms for
+                * advanced resolution mode.
+                */
+               ret = wait_for_completion_timeout(&data->done,
+                                                 1 + msecs_to_jiffies(100));
+               if (!ret)
+                       dev_err(data->dev, "timeout waiting for completion\n");
+       } else {
+               if (ctrl_meas == BMP180_MEAS_TEMP)
+                       delay_us = 4500;
+               else
+                       delay_us =
+                               conversion_time_max[data->oversampling_press];
+
+               usleep_range(delay_us, delay_us + 1000);
+       }
 
        ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl);
        if (ret)
@@ -617,26 +685,6 @@ static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
        return 0;
 }
 
-/*
- * These enums are used for indexing into the array of calibration
- * coefficients for BMP180.
- */
-enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
-
-struct bmp180_calib {
-       s16 AC1;
-       s16 AC2;
-       s16 AC3;
-       u16 AC4;
-       u16 AC5;
-       u16 AC6;
-       s16 B1;
-       s16 B2;
-       s16 MB;
-       s16 MC;
-       s16 MD;
-};
-
 static int bmp180_read_calib(struct bmp280_data *data,
                             struct bmp180_calib *calib)
 {
@@ -656,6 +704,9 @@ static int bmp180_read_calib(struct bmp280_data *data,
                        return -EIO;
        }
 
+       /* Toss the calibration data into the entropy pool */
+       add_device_randomness(buf, sizeof(buf));
+
        calib->AC1 = be16_to_cpu(buf[AC1]);
        calib->AC2 = be16_to_cpu(buf[AC2]);
        calib->AC3 = be16_to_cpu(buf[AC3]);
@@ -679,19 +730,11 @@ static int bmp180_read_calib(struct bmp280_data *data,
  */
 static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
 {
-       int ret;
        s32 x1, x2;
-       struct bmp180_calib calib;
+       struct bmp180_calib *calib = &data->calib;
 
-       ret = bmp180_read_calib(data, &calib);
-       if (ret < 0) {
-               dev_err(&data->client->dev,
-                       "failed to read calibration coefficients\n");
-               return ret;
-       }
-
-       x1 = ((adc_temp - calib.AC6) * calib.AC5) >> 15;
-       x2 = (calib.MC << 11) / (x1 + calib.MD);
+       x1 = ((adc_temp - calib->AC6) * calib->AC5) >> 15;
+       x2 = (calib->MC << 11) / (x1 + calib->MD);
        data->t_fine = x1 + x2;
 
        return (data->t_fine + 8) >> 4;
@@ -746,29 +789,21 @@ static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
  */
 static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
 {
-       int ret;
        s32 x1, x2, x3, p;
        s32 b3, b6;
        u32 b4, b7;
        s32 oss = data->oversampling_press;
-       struct bmp180_calib calib;
-
-       ret = bmp180_read_calib(data, &calib);
-       if (ret < 0) {
-               dev_err(&data->client->dev,
-                       "failed to read calibration coefficients\n");
-               return ret;
-       }
+       struct bmp180_calib *calib = &data->calib;
 
        b6 = data->t_fine - 4000;
-       x1 = (calib.B2 * (b6 * b6 >> 12)) >> 11;
-       x2 = calib.AC2 * b6 >> 11;
+       x1 = (calib->B2 * (b6 * b6 >> 12)) >> 11;
+       x2 = calib->AC2 * b6 >> 11;
        x3 = x1 + x2;
-       b3 = ((((s32)calib.AC1 * 4 + x3) << oss) + 2) / 4;
-       x1 = calib.AC3 * b6 >> 13;
-       x2 = (calib.B1 * ((b6 * b6) >> 12)) >> 16;
+       b3 = ((((s32)calib->AC1 * 4 + x3) << oss) + 2) / 4;
+       x1 = calib->AC3 * b6 >> 13;
+       x2 = (calib->B1 * ((b6 * b6) >> 12)) >> 16;
        x3 = (x1 + x2 + 2) >> 2;
-       b4 = calib.AC4 * (u32)(x3 + 32768) >> 15;
+       b4 = calib->AC4 * (u32)(x3 + 32768) >> 15;
        b7 = ((u32)adc_press - b3) * (50000 >> oss);
        if (b7 < 0x80000000)
                p = (b7 * 2) / b4;
@@ -815,8 +850,6 @@ static const int bmp180_oversampling_temp_avail[] = { 1 };
 static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
 
 static const struct bmp280_chip_info bmp180_chip_info = {
-       .regmap_config = &bmp180_regmap_config,
-
        .oversampling_temp_avail = bmp180_oversampling_temp_avail,
        .num_oversampling_temp_avail =
                ARRAY_SIZE(bmp180_oversampling_temp_avail),
@@ -830,92 +863,254 @@ static const struct bmp280_chip_info bmp180_chip_info = {
        .read_press = bmp180_read_press,
 };
 
-static int bmp280_probe(struct i2c_client *client,
-                       const struct i2c_device_id *id)
+static irqreturn_t bmp085_eoc_irq(int irq, void *d)
+{
+       struct bmp280_data *data = d;
+
+       complete(&data->done);
+
+       return IRQ_HANDLED;
+}
+
+static int bmp085_fetch_eoc_irq(struct device *dev,
+                               const char *name,
+                               int irq,
+                               struct bmp280_data *data)
+{
+       unsigned long irq_trig;
+       int ret;
+
+       irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+       if (irq_trig != IRQF_TRIGGER_RISING) {
+               dev_err(dev, "non-rising trigger given for EOC interrupt, "
+                       "trying to enforce it\n");
+               irq_trig = IRQF_TRIGGER_RISING;
+       }
+       ret = devm_request_threaded_irq(dev,
+                       irq,
+                       bmp085_eoc_irq,
+                       NULL,
+                       irq_trig,
+                       name,
+                       data);
+       if (ret) {
+               /* Bail out without IRQ but keep the driver in place */
+               dev_err(dev, "unable to request DRDY IRQ\n");
+               return 0;
+       }
+
+       data->use_eoc = true;
+       return 0;
+}
+
+int bmp280_common_probe(struct device *dev,
+                       struct regmap *regmap,
+                       unsigned int chip,
+                       const char *name,
+                       int irq)
 {
        int ret;
        struct iio_dev *indio_dev;
        struct bmp280_data *data;
        unsigned int chip_id;
+       struct gpio_desc *gpiod;
 
-       indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+       indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
        if (!indio_dev)
                return -ENOMEM;
 
        data = iio_priv(indio_dev);
        mutex_init(&data->lock);
-       data->client = client;
+       data->dev = dev;
 
-       indio_dev->dev.parent = &client->dev;
-       indio_dev->name = id->name;
+       indio_dev->dev.parent = dev;
+       indio_dev->name = name;
        indio_dev->channels = bmp280_channels;
-       indio_dev->num_channels = ARRAY_SIZE(bmp280_channels);
        indio_dev->info = &bmp280_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
 
-       switch (id->driver_data) {
+       switch (chip) {
        case BMP180_CHIP_ID:
+               indio_dev->num_channels = 2;
                data->chip_info = &bmp180_chip_info;
                data->oversampling_press = ilog2(8);
                data->oversampling_temp = ilog2(1);
+               data->start_up_time = 10;
                break;
        case BMP280_CHIP_ID:
+               indio_dev->num_channels = 2;
                data->chip_info = &bmp280_chip_info;
                data->oversampling_press = ilog2(16);
                data->oversampling_temp = ilog2(2);
+               data->start_up_time = 2;
+               break;
+       case BME280_CHIP_ID:
+               indio_dev->num_channels = 3;
+               data->chip_info = &bme280_chip_info;
+               data->oversampling_press = ilog2(16);
+               data->oversampling_humid = ilog2(16);
+               data->oversampling_temp = ilog2(2);
+               data->start_up_time = 2;
                break;
        default:
                return -EINVAL;
        }
 
-       data->regmap = devm_regmap_init_i2c(client,
-                                       data->chip_info->regmap_config);
-       if (IS_ERR(data->regmap)) {
-               dev_err(&client->dev, "failed to allocate register map\n");
-               return PTR_ERR(data->regmap);
+       /* Bring up regulators */
+       data->vddd = devm_regulator_get(dev, "vddd");
+       if (IS_ERR(data->vddd)) {
+               dev_err(dev, "failed to get VDDD regulator\n");
+               return PTR_ERR(data->vddd);
+       }
+       ret = regulator_enable(data->vddd);
+       if (ret) {
+               dev_err(dev, "failed to enable VDDD regulator\n");
+               return ret;
+       }
+       data->vdda = devm_regulator_get(dev, "vdda");
+       if (IS_ERR(data->vdda)) {
+               dev_err(dev, "failed to get VDDA regulator\n");
+               ret = PTR_ERR(data->vddd);
+               goto out_disable_vddd;
+       }
+       ret = regulator_enable(data->vdda);
+       if (ret) {
+               dev_err(dev, "failed to enable VDDA regulator\n");
+               goto out_disable_vddd;
+       }
+       /* Wait to make sure we started up properly */
+       mdelay(data->start_up_time);
+
+       /* Bring chip out of reset if there is an assigned GPIO line */
+       gpiod = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+       /* Deassert the signal */
+       if (!IS_ERR(gpiod)) {
+               dev_info(dev, "release reset\n");
+               gpiod_set_value(gpiod, 0);
        }
 
-       ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id);
+       data->regmap = regmap;
+       ret = regmap_read(regmap, BMP280_REG_ID, &chip_id);
        if (ret < 0)
-               return ret;
-       if (chip_id != id->driver_data) {
-               dev_err(&client->dev, "bad chip id.  expected %lx got %x\n",
-                       id->driver_data, chip_id);
-               return -EINVAL;
+               goto out_disable_vdda;
+       if (chip_id != chip) {
+               dev_err(dev, "bad chip id: expected %x got %x\n",
+                       chip, chip_id);
+               ret = -EINVAL;
+               goto out_disable_vdda;
        }
 
        ret = data->chip_info->chip_config(data);
        if (ret < 0)
-               return ret;
+               goto out_disable_vdda;
+
+       dev_set_drvdata(dev, indio_dev);
+
+       /*
+        * The BMP085 and BMP180 has calibration in an E2PROM, read it out
+        * at probe time. It will not change.
+        */
+       if (chip_id  == BMP180_CHIP_ID) {
+               ret = bmp180_read_calib(data, &data->calib);
+               if (ret < 0) {
+                       dev_err(data->dev,
+                               "failed to read calibration coefficients\n");
+                       goto out_disable_vdda;
+               }
+       }
+
+       /*
+        * Attempt to grab an optional EOC IRQ - only the BMP085 has this
+        * however as it happens, the BMP085 shares the chip ID of BMP180
+        * so we look for an IRQ if we have that.
+        */
+       if (irq > 0 || (chip_id  == BMP180_CHIP_ID)) {
+               ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
+               if (ret)
+                       goto out_disable_vdda;
+       }
+
+       /* Enable runtime PM */
+       pm_runtime_get_noresume(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+       /*
+        * Set autosuspend to two orders of magnitude larger than the
+        * start-up time.
+        */
+       pm_runtime_set_autosuspend_delay(dev, data->start_up_time *100);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_put(dev);
+
+       ret = iio_device_register(indio_dev);
+       if (ret)
+               goto out_runtime_pm_disable;
+
 
-       return devm_iio_device_register(&client->dev, indio_dev);
+       return 0;
+
+out_runtime_pm_disable:
+       pm_runtime_get_sync(data->dev);
+       pm_runtime_put_noidle(data->dev);
+       pm_runtime_disable(data->dev);
+out_disable_vdda:
+       regulator_disable(data->vdda);
+out_disable_vddd:
+       regulator_disable(data->vddd);
+       return ret;
 }
+EXPORT_SYMBOL(bmp280_common_probe);
 
-static const struct acpi_device_id bmp280_acpi_match[] = {
-       {"BMP0280", BMP280_CHIP_ID },
-       {"BMP0180", BMP180_CHIP_ID },
-       {"BMP0085", BMP180_CHIP_ID },
-       { },
-};
-MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match);
+int bmp280_common_remove(struct device *dev)
+{
+       struct iio_dev *indio_dev = dev_get_drvdata(dev);
+       struct bmp280_data *data = iio_priv(indio_dev);
 
-static const struct i2c_device_id bmp280_id[] = {
-       {"bmp280", BMP280_CHIP_ID },
-       {"bmp180", BMP180_CHIP_ID },
-       {"bmp085", BMP180_CHIP_ID },
-       { },
-};
-MODULE_DEVICE_TABLE(i2c, bmp280_id);
+       iio_device_unregister(indio_dev);
+       pm_runtime_get_sync(data->dev);
+       pm_runtime_put_noidle(data->dev);
+       pm_runtime_disable(data->dev);
+       regulator_disable(data->vdda);
+       regulator_disable(data->vddd);
+       return 0;
+}
+EXPORT_SYMBOL(bmp280_common_remove);
 
-static struct i2c_driver bmp280_driver = {
-       .driver = {
-               .name   = "bmp280",
-               .acpi_match_table = ACPI_PTR(bmp280_acpi_match),
-       },
-       .probe          = bmp280_probe,
-       .id_table       = bmp280_id,
+#ifdef CONFIG_PM
+static int bmp280_runtime_suspend(struct device *dev)
+{
+       struct bmp280_data *data = dev_get_drvdata(dev);
+       int ret;
+
+       ret = regulator_disable(data->vdda);
+       if (ret)
+               return ret;
+       return regulator_disable(data->vddd);
+}
+
+static int bmp280_runtime_resume(struct device *dev)
+{
+       struct bmp280_data *data = dev_get_drvdata(dev);
+       int ret;
+
+       ret = regulator_enable(data->vddd);
+       if (ret)
+               return ret;
+       ret = regulator_enable(data->vdda);
+       if (ret)
+               return ret;
+       msleep(data->start_up_time);
+       return data->chip_info->chip_config(data);
+}
+#endif /* CONFIG_PM */
+
+const struct dev_pm_ops bmp280_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+                               pm_runtime_force_resume)
+       SET_RUNTIME_PM_OPS(bmp280_runtime_suspend,
+                          bmp280_runtime_resume, NULL)
 };
-module_i2c_driver(bmp280_driver);
+EXPORT_SYMBOL(bmp280_dev_pm_ops);
 
 MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
 MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
new file mode 100644 (file)
index 0000000..03742b1
--- /dev/null
@@ -0,0 +1,91 @@
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include "bmp280.h"
+
+static int bmp280_i2c_probe(struct i2c_client *client,
+                           const struct i2c_device_id *id)
+{
+       struct regmap *regmap;
+       const struct regmap_config *regmap_config;
+
+       switch (id->driver_data) {
+       case BMP180_CHIP_ID:
+               regmap_config = &bmp180_regmap_config;
+               break;
+       case BMP280_CHIP_ID:
+       case BME280_CHIP_ID:
+               regmap_config = &bmp280_regmap_config;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       regmap = devm_regmap_init_i2c(client, regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(&client->dev, "failed to allocate register map\n");
+               return PTR_ERR(regmap);
+       }
+
+       return bmp280_common_probe(&client->dev,
+                                  regmap,
+                                  id->driver_data,
+                                  id->name,
+                                  client->irq);
+}
+
+static int bmp280_i2c_remove(struct i2c_client *client)
+{
+       return bmp280_common_remove(&client->dev);
+}
+
+static const struct acpi_device_id bmp280_acpi_i2c_match[] = {
+       {"BMP0280", BMP280_CHIP_ID },
+       {"BMP0180", BMP180_CHIP_ID },
+       {"BMP0085", BMP180_CHIP_ID },
+       {"BME0280", BME280_CHIP_ID },
+       { },
+};
+MODULE_DEVICE_TABLE(acpi, bmp280_acpi_i2c_match);
+
+#ifdef CONFIG_OF
+static const struct of_device_id bmp280_of_i2c_match[] = {
+       { .compatible = "bosch,bme280", .data = (void *)BME280_CHIP_ID },
+       { .compatible = "bosch,bmp280", .data = (void *)BMP280_CHIP_ID },
+       { .compatible = "bosch,bmp180", .data = (void *)BMP180_CHIP_ID },
+       { .compatible = "bosch,bmp085", .data = (void *)BMP180_CHIP_ID },
+       { },
+};
+MODULE_DEVICE_TABLE(of, bmp280_of_i2c_match);
+#else
+#define bmp280_of_i2c_match NULL
+#endif
+
+static const struct i2c_device_id bmp280_i2c_id[] = {
+       {"bmp280", BMP280_CHIP_ID },
+       {"bmp180", BMP180_CHIP_ID },
+       {"bmp085", BMP180_CHIP_ID },
+       {"bme280", BME280_CHIP_ID },
+       { },
+};
+MODULE_DEVICE_TABLE(i2c, bmp280_i2c_id);
+
+static struct i2c_driver bmp280_i2c_driver = {
+       .driver = {
+               .name   = "bmp280",
+               .acpi_match_table = ACPI_PTR(bmp280_acpi_i2c_match),
+               .of_match_table = of_match_ptr(bmp280_of_i2c_match),
+               .pm = &bmp280_dev_pm_ops,
+       },
+       .probe          = bmp280_i2c_probe,
+       .remove         = bmp280_i2c_remove,
+       .id_table       = bmp280_i2c_id,
+};
+module_i2c_driver(bmp280_i2c_driver);
+
+MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
+MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/bmp280-regmap.c b/drivers/iio/pressure/bmp280-regmap.c
new file mode 100644 (file)
index 0000000..6807113
--- /dev/null
@@ -0,0 +1,84 @@
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "bmp280.h"
+
+static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case BMP280_REG_CTRL_MEAS:
+       case BMP280_REG_RESET:
+               return true;
+       default:
+               return false;
+       };
+}
+
+static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case BMP180_REG_OUT_XLSB:
+       case BMP180_REG_OUT_LSB:
+       case BMP180_REG_OUT_MSB:
+       case BMP280_REG_CTRL_MEAS:
+               return true;
+       default:
+               return false;
+       }
+}
+
+const struct regmap_config bmp180_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = BMP180_REG_OUT_XLSB,
+       .cache_type = REGCACHE_RBTREE,
+
+       .writeable_reg = bmp180_is_writeable_reg,
+       .volatile_reg = bmp180_is_volatile_reg,
+};
+EXPORT_SYMBOL(bmp180_regmap_config);
+
+static bool bmp280_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case BMP280_REG_CONFIG:
+       case BMP280_REG_CTRL_HUMIDITY:
+       case BMP280_REG_CTRL_MEAS:
+       case BMP280_REG_RESET:
+               return true;
+       default:
+               return false;
+       };
+}
+
+static bool bmp280_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case BMP280_REG_HUMIDITY_LSB:
+       case BMP280_REG_HUMIDITY_MSB:
+       case BMP280_REG_TEMP_XLSB:
+       case BMP280_REG_TEMP_LSB:
+       case BMP280_REG_TEMP_MSB:
+       case BMP280_REG_PRESS_XLSB:
+       case BMP280_REG_PRESS_LSB:
+       case BMP280_REG_PRESS_MSB:
+       case BMP280_REG_STATUS:
+               return true;
+       default:
+               return false;
+       }
+}
+
+const struct regmap_config bmp280_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = BMP280_REG_HUMIDITY_LSB,
+       .cache_type = REGCACHE_RBTREE,
+
+       .writeable_reg = bmp280_is_writeable_reg,
+       .volatile_reg = bmp280_is_volatile_reg,
+};
+EXPORT_SYMBOL(bmp280_regmap_config);
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
new file mode 100644 (file)
index 0000000..17bc955
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * SPI interface for the BMP280 driver
+ *
+ * Inspired by the older BMP085 driver drivers/misc/bmp085-spi.c
+ */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+
+#include "bmp280.h"
+
+static int bmp280_regmap_spi_write(void *context, const void *data,
+                                   size_t count)
+{
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+       u8 buf[2];
+
+       memcpy(buf, data, 2);
+       /*
+        * The SPI register address (= full register address without bit 7) and
+        * the write command (bit7 = RW = '0')
+        */
+       buf[0] &= ~0x80;
+
+       return spi_write_then_read(spi, buf, 2, NULL, 0);
+}
+
+static int bmp280_regmap_spi_read(void *context, const void *reg,
+                                  size_t reg_size, void *val, size_t val_size)
+{
+       struct device *dev = context;
+       struct spi_device *spi = to_spi_device(dev);
+
+       return spi_write_then_read(spi, reg, reg_size, val, val_size);
+}
+
+static struct regmap_bus bmp280_regmap_bus = {
+       .write = bmp280_regmap_spi_write,
+       .read = bmp280_regmap_spi_read,
+       .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+       .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static int bmp280_spi_probe(struct spi_device *spi)
+{
+       const struct spi_device_id *id = spi_get_device_id(spi);
+       struct regmap *regmap;
+       const struct regmap_config *regmap_config;
+       int ret;
+
+       spi->bits_per_word = 8;
+       ret = spi_setup(spi);
+       if (ret < 0) {
+               dev_err(&spi->dev, "spi_setup failed!\n");
+               return ret;
+       }
+
+       switch (id->driver_data) {
+       case BMP180_CHIP_ID:
+               regmap_config = &bmp180_regmap_config;
+               break;
+       case BMP280_CHIP_ID:
+       case BME280_CHIP_ID:
+               regmap_config = &bmp280_regmap_config;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       regmap = devm_regmap_init(&spi->dev,
+                                 &bmp280_regmap_bus,
+                                 &spi->dev,
+                                 regmap_config);
+       if (IS_ERR(regmap)) {
+               dev_err(&spi->dev, "failed to allocate register map\n");
+               return PTR_ERR(regmap);
+       }
+
+       return bmp280_common_probe(&spi->dev,
+                                  regmap,
+                                  id->driver_data,
+                                  id->name,
+                                  spi->irq);
+}
+
+static int bmp280_spi_remove(struct spi_device *spi)
+{
+       return bmp280_common_remove(&spi->dev);
+}
+
+static const struct of_device_id bmp280_of_spi_match[] = {
+       { .compatible = "bosch,bmp085", },
+       { .compatible = "bosch,bmp180", },
+       { .compatible = "bosch,bmp181", },
+       { .compatible = "bosch,bmp280", },
+       { .compatible = "bosch,bme280", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, bmp280_of_spi_match);
+
+static const struct spi_device_id bmp280_spi_id[] = {
+       { "bmp180", BMP180_CHIP_ID },
+       { "bmp181", BMP180_CHIP_ID },
+       { "bmp280", BMP280_CHIP_ID },
+       { "bme280", BME280_CHIP_ID },
+       { }
+};
+MODULE_DEVICE_TABLE(spi, bmp280_spi_id);
+
+static struct spi_driver bmp280_spi_driver = {
+       .driver = {
+               .name = "bmp280",
+               .of_match_table = bmp280_of_spi_match,
+               .pm = &bmp280_dev_pm_ops,
+       },
+       .id_table = bmp280_spi_id,
+       .probe = bmp280_spi_probe,
+       .remove = bmp280_spi_remove,
+};
+module_spi_driver(bmp280_spi_driver);
+
+MODULE_DESCRIPTION("BMP280 SPI bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h
new file mode 100644 (file)
index 0000000..2c770e1
--- /dev/null
@@ -0,0 +1,112 @@
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* BMP280 specific registers */
+#define BMP280_REG_HUMIDITY_LSB                0xFE
+#define BMP280_REG_HUMIDITY_MSB                0xFD
+#define BMP280_REG_TEMP_XLSB           0xFC
+#define BMP280_REG_TEMP_LSB            0xFB
+#define BMP280_REG_TEMP_MSB            0xFA
+#define BMP280_REG_PRESS_XLSB          0xF9
+#define BMP280_REG_PRESS_LSB           0xF8
+#define BMP280_REG_PRESS_MSB           0xF7
+
+#define BMP280_REG_CONFIG              0xF5
+#define BMP280_REG_CTRL_MEAS           0xF4
+#define BMP280_REG_STATUS              0xF3
+#define BMP280_REG_CTRL_HUMIDITY       0xF2
+
+/* Due to non linear mapping, and data sizes we can't do a bulk read */
+#define BMP280_REG_COMP_H1             0xA1
+#define BMP280_REG_COMP_H2             0xE1
+#define BMP280_REG_COMP_H3             0xE3
+#define BMP280_REG_COMP_H4             0xE4
+#define BMP280_REG_COMP_H5             0xE5
+#define BMP280_REG_COMP_H6             0xE7
+
+#define BMP280_REG_COMP_TEMP_START     0x88
+#define BMP280_COMP_TEMP_REG_COUNT     6
+
+#define BMP280_REG_COMP_PRESS_START    0x8E
+#define BMP280_COMP_PRESS_REG_COUNT    18
+
+#define BMP280_FILTER_MASK             (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_FILTER_OFF              0
+#define BMP280_FILTER_2X               BIT(2)
+#define BMP280_FILTER_4X               BIT(3)
+#define BMP280_FILTER_8X               (BIT(3) | BIT(2))
+#define BMP280_FILTER_16X              BIT(4)
+
+#define BMP280_OSRS_HUMIDITY_MASK      (BIT(2) | BIT(1) | BIT(0))
+#define BMP280_OSRS_HUMIDITIY_X(osrs_h)        ((osrs_h) << 0)
+#define BMP280_OSRS_HUMIDITY_SKIP      0
+#define BMP280_OSRS_HUMIDITY_1X                BMP280_OSRS_HUMIDITIY_X(1)
+#define BMP280_OSRS_HUMIDITY_2X                BMP280_OSRS_HUMIDITIY_X(2)
+#define BMP280_OSRS_HUMIDITY_4X                BMP280_OSRS_HUMIDITIY_X(3)
+#define BMP280_OSRS_HUMIDITY_8X                BMP280_OSRS_HUMIDITIY_X(4)
+#define BMP280_OSRS_HUMIDITY_16X       BMP280_OSRS_HUMIDITIY_X(5)
+
+#define BMP280_OSRS_TEMP_MASK          (BIT(7) | BIT(6) | BIT(5))
+#define BMP280_OSRS_TEMP_SKIP          0
+#define BMP280_OSRS_TEMP_X(osrs_t)     ((osrs_t) << 5)
+#define BMP280_OSRS_TEMP_1X            BMP280_OSRS_TEMP_X(1)
+#define BMP280_OSRS_TEMP_2X            BMP280_OSRS_TEMP_X(2)
+#define BMP280_OSRS_TEMP_4X            BMP280_OSRS_TEMP_X(3)
+#define BMP280_OSRS_TEMP_8X            BMP280_OSRS_TEMP_X(4)
+#define BMP280_OSRS_TEMP_16X           BMP280_OSRS_TEMP_X(5)
+
+#define BMP280_OSRS_PRESS_MASK         (BIT(4) | BIT(3) | BIT(2))
+#define BMP280_OSRS_PRESS_SKIP         0
+#define BMP280_OSRS_PRESS_X(osrs_p)    ((osrs_p) << 2)
+#define BMP280_OSRS_PRESS_1X           BMP280_OSRS_PRESS_X(1)
+#define BMP280_OSRS_PRESS_2X           BMP280_OSRS_PRESS_X(2)
+#define BMP280_OSRS_PRESS_4X           BMP280_OSRS_PRESS_X(3)
+#define BMP280_OSRS_PRESS_8X           BMP280_OSRS_PRESS_X(4)
+#define BMP280_OSRS_PRESS_16X          BMP280_OSRS_PRESS_X(5)
+
+#define BMP280_MODE_MASK               (BIT(1) | BIT(0))
+#define BMP280_MODE_SLEEP              0
+#define BMP280_MODE_FORCED             BIT(0)
+#define BMP280_MODE_NORMAL             (BIT(1) | BIT(0))
+
+/* BMP180 specific registers */
+#define BMP180_REG_OUT_XLSB            0xF8
+#define BMP180_REG_OUT_LSB             0xF7
+#define BMP180_REG_OUT_MSB             0xF6
+
+#define BMP180_REG_CALIB_START         0xAA
+#define BMP180_REG_CALIB_COUNT         22
+
+#define BMP180_MEAS_SCO                        BIT(5)
+#define BMP180_MEAS_TEMP               (0x0E | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_X(oss)       ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_1X           BMP180_MEAS_PRESS_X(0)
+#define BMP180_MEAS_PRESS_2X           BMP180_MEAS_PRESS_X(1)
+#define BMP180_MEAS_PRESS_4X           BMP180_MEAS_PRESS_X(2)
+#define BMP180_MEAS_PRESS_8X           BMP180_MEAS_PRESS_X(3)
+
+/* BMP180 and BMP280 common registers */
+#define BMP280_REG_CTRL_MEAS           0xF4
+#define BMP280_REG_RESET               0xE0
+#define BMP280_REG_ID                  0xD0
+
+#define BMP180_CHIP_ID                 0x55
+#define BMP280_CHIP_ID                 0x58
+#define BME280_CHIP_ID                 0x60
+#define BMP280_SOFT_RESET_VAL          0xB6
+
+/* Regmap configurations */
+extern const struct regmap_config bmp180_regmap_config;
+extern const struct regmap_config bmp280_regmap_config;
+
+/* Probe called from different transports */
+int bmp280_common_probe(struct device *dev,
+                       struct regmap *regmap,
+                       unsigned int chip,
+                       const char *name,
+                       int irq);
+int bmp280_common_remove(struct device *dev);
+
+/* PM ops */
+extern const struct dev_pm_ops bmp280_dev_pm_ops;
index 90f2b6e4a9203670d1d8eec61c77ced37907f18f..12f769e863555c73b7fe8f200e7f8493b09cdfea 100644 (file)
@@ -401,6 +401,7 @@ static const struct i2c_device_id hp206c_id[] = {
        {"hp206c"},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, hp206c_id);
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id hp206c_acpi_match[] = {
index 01b2e0b1887881910874a43fb7efec4c4298a609..6392d7b62841024e237cc5a4d19bdb5b0fbb18b5 100644 (file)
@@ -171,7 +171,7 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
        mutex_unlock(&data->lock);
 
        iio_push_to_buffers_with_timestamp(indio_dev, buffer,
-               iio_get_time_ns());
+               iio_get_time_ns(indio_dev));
 
 done:
        iio_trigger_notify_done(indio_dev->trig);
index 76578b07bb6e6e67734cbcf0236131c612cec92c..feb41f82c64ad58dacde63c35ba7096868b37965 100644 (file)
@@ -224,7 +224,8 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p)
        if (ret < 0)
                goto err;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+                                          iio_get_time_ns(indio_dev));
 
 err:
        iio_trigger_notify_done(indio_dev->trig);
index e68052c118e60163b023c7e55e0d86dd39793fd8..953ffbc0ef96046546c81ce5c497e59a64cc4134 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * ms5637.c - Support for Measurement-Specialties ms5637 and ms8607
- *            pressure & temperature sensor
+ * ms5637.c - Support for Measurement-Specialties MS5637, MS5805
+ *            MS5837 and MS8607 pressure & temperature sensor
  *
  * Copyright (c) 2015 Measurement-Specialties
  *
  * Datasheet:
  *  http://www.meas-spec.com/downloads/MS5637-02BA03.pdf
  * Datasheet:
+ *  http://www.meas-spec.com/downloads/MS5805-02BA01.pdf
+ * Datasheet:
+ *  http://www.meas-spec.com/downloads/MS5837-30BA.pdf
+ * Datasheet:
  *  http://www.meas-spec.com/downloads/MS8607-02BA01.pdf
  */
 
@@ -170,9 +174,12 @@ static int ms5637_probe(struct i2c_client *client,
 
 static const struct i2c_device_id ms5637_id[] = {
        {"ms5637", 0},
-       {"ms8607-temppressure", 1},
+       {"ms5805", 0},
+       {"ms5837", 0},
+       {"ms8607-temppressure", 0},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, ms5637_id);
 
 static struct i2c_driver ms5637_driver = {
        .probe = ms5637_probe,
index f5f41490060b95be8e38ce5fb79bcee15e0dffc1..903a21e46874327052d300708169e34146bef26d 100644 (file)
@@ -17,6 +17,7 @@
 #define LPS001WP_PRESS_DEV_NAME                "lps001wp"
 #define LPS25H_PRESS_DEV_NAME          "lps25h"
 #define LPS331AP_PRESS_DEV_NAME                "lps331ap"
+#define LPS22HB_PRESS_DEV_NAME         "lps22hb"
 
 /**
  * struct st_sensors_platform_data - default press platform data
index 92a118c3c4acf3bc1529c4350dde5154b41c4abb..55df9a75eb3a28c5786c963a5df1b7f52bf6dd65 100644 (file)
 #include <linux/iio/common/st_sensors.h>
 #include "st_pressure.h"
 
+/*
+ * About determining pressure scaling factors
+ * ------------------------------------------
+ *
+ * Datasheets specify typical pressure sensitivity so that pressure is computed
+ * according to the following equation :
+ *     pressure[mBar] = raw / sensitivity
+ * where :
+ *     raw          the 24 bits long raw sampled pressure
+ *     sensitivity  a scaling factor specified by the datasheet in LSB/mBar
+ *
+ * IIO ABI expects pressure to be expressed as kPascal, hence pressure should be
+ * computed according to :
+ *     pressure[kPascal] = pressure[mBar] / 10
+ *                       = raw / (sensitivity * 10)                          (1)
+ *
+ * Finally, st_press_read_raw() returns pressure scaling factor as an
+ * IIO_VAL_INT_PLUS_NANO with a zero integral part and "gain" as decimal part.
+ * Therefore, from (1), "gain" becomes :
+ *     gain = 10^9 / (sensitivity * 10)
+ *          = 10^8 / sensitivity
+ *
+ * About determining temperature scaling factors and offsets
+ * ---------------------------------------------------------
+ *
+ * Datasheets specify typical temperature sensitivity and offset so that
+ * temperature is computed according to the following equation :
+ *     temp[Celsius] = offset[Celsius] + (raw / sensitivity)
+ * where :
+ *     raw          the 16 bits long raw sampled temperature
+ *     offset       a constant specified by the datasheet in degree Celsius
+ *                  (sometimes zero)
+ *     sensitivity  a scaling factor specified by the datasheet in LSB/Celsius
+ *
+ * IIO ABI expects temperature to be expressed as milli degree Celsius such as
+ * user space should compute temperature according to :
+ *     temp[mCelsius] = temp[Celsius] * 10^3
+ *                    = (offset[Celsius] + (raw / sensitivity)) * 10^3
+ *                    = ((offset[Celsius] * sensitivity) + raw) *
+ *                      (10^3 / sensitivity)                                 (2)
+ *
+ * IIO ABI expects user space to apply offset and scaling factors to raw samples
+ * according to :
+ *     temp[mCelsius] = (OFFSET + raw) * SCALE
+ * where :
+ *     OFFSET an arbitrary constant exposed by device
+ *     SCALE  an arbitrary scaling factor exposed by device
+ *
+ * Matching OFFSET and SCALE with members of (2) gives :
+ *     OFFSET = offset[Celsius] * sensitivity                                (3)
+ *     SCALE  = 10^3 / sensitivity                                           (4)
+ *
+ * st_press_read_raw() returns temperature scaling factor as an
+ * IIO_VAL_FRACTIONAL with a 10^3 numerator and "gain2" as denominator.
+ * Therefore, from (3), "gain2" becomes :
+ *     gain2 = sensitivity
+ *
+ * When declared within channel, i.e. for a non zero specified offset,
+ * st_press_read_raw() will return the latter as an IIO_VAL_FRACTIONAL such as :
+ *     numerator = OFFSET * 10^3
+ *     denominator = 10^3
+ * giving from (4):
+ *     numerator = offset[Celsius] * 10^3 * sensitivity
+ *               = offset[mCelsius] * gain2
+ */
+
 #define MCELSIUS_PER_CELSIUS                   1000
 
 /* Default pressure sensitivity */
 #define ST_PRESS_LSB_PER_CELSIUS               480UL
 #define ST_PRESS_MILLI_CELSIUS_OFFSET          42500UL
 
-#define ST_PRESS_NUMBER_DATA_CHANNELS          1
-
 /* FULLSCALE */
 #define ST_PRESS_FS_AVL_1100MB                 1100
 #define ST_PRESS_FS_AVL_1260MB                 1260
 #define ST_PRESS_1_OUT_XL_ADDR                 0x28
 #define ST_TEMP_1_OUT_L_ADDR                   0x2b
 
-/* CUSTOM VALUES FOR LPS331AP SENSOR */
+/*
+ * CUSTOM VALUES FOR LPS331AP SENSOR
+ * See LPS331AP datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps331ap.pdf
+ */
 #define ST_PRESS_LPS331AP_WAI_EXP              0xbb
 #define ST_PRESS_LPS331AP_ODR_ADDR             0x20
 #define ST_PRESS_LPS331AP_ODR_MASK             0x70
 #define ST_PRESS_LPS331AP_OD_IRQ_MASK          0x40
 #define ST_PRESS_LPS331AP_MULTIREAD_BIT                true
 
-/* CUSTOM VALUES FOR LPS001WP SENSOR */
+/*
+ * CUSTOM VALUES FOR THE OBSOLETE LPS001WP SENSOR
+ */
 
 /* LPS001WP pressure resolution */
 #define ST_PRESS_LPS001WP_LSB_PER_MBAR         16UL
 #define ST_PRESS_LPS001WP_OUT_L_ADDR           0x28
 #define ST_TEMP_LPS001WP_OUT_L_ADDR            0x2a
 
-/* CUSTOM VALUES FOR LPS25H SENSOR */
+/*
+ * CUSTOM VALUES FOR LPS25H SENSOR
+ * See LPS25H datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps25h.pdf
+ */
 #define ST_PRESS_LPS25H_WAI_EXP                        0xbd
 #define ST_PRESS_LPS25H_ODR_ADDR               0x20
 #define ST_PRESS_LPS25H_ODR_MASK               0x70
 #define ST_PRESS_LPS25H_OUT_XL_ADDR            0x28
 #define ST_TEMP_LPS25H_OUT_L_ADDR              0x2b
 
+/*
+ * CUSTOM VALUES FOR LPS22HB SENSOR
+ * See LPS22HB datasheet:
+ * http://www2.st.com/resource/en/datasheet/lps22hb.pdf
+ */
+
+/* LPS22HB temperature sensitivity */
+#define ST_PRESS_LPS22HB_LSB_PER_CELSIUS       100UL
+
+#define ST_PRESS_LPS22HB_WAI_EXP               0xb1
+#define ST_PRESS_LPS22HB_ODR_ADDR              0x10
+#define ST_PRESS_LPS22HB_ODR_MASK              0x70
+#define ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL       0x01
+#define ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL      0x02
+#define ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL      0x03
+#define ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL      0x04
+#define ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL      0x05
+#define ST_PRESS_LPS22HB_PW_ADDR               0x10
+#define ST_PRESS_LPS22HB_PW_MASK               0x70
+#define ST_PRESS_LPS22HB_BDU_ADDR              0x10
+#define ST_PRESS_LPS22HB_BDU_MASK              0x02
+#define ST_PRESS_LPS22HB_DRDY_IRQ_ADDR         0x12
+#define ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK    0x04
+#define ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK    0x08
+#define ST_PRESS_LPS22HB_IHL_IRQ_ADDR          0x12
+#define ST_PRESS_LPS22HB_IHL_IRQ_MASK          0x80
+#define ST_PRESS_LPS22HB_OD_IRQ_ADDR           0x12
+#define ST_PRESS_LPS22HB_OD_IRQ_MASK           0x40
+#define ST_PRESS_LPS22HB_MULTIREAD_BIT         true
+
 static const struct iio_chan_spec st_press_1_channels[] = {
        {
                .type = IIO_PRESSURE,
-               .channel2 = IIO_NO_MOD,
                .address = ST_PRESS_1_OUT_XL_ADDR,
-               .scan_index = ST_SENSORS_SCAN_X,
+               .scan_index = 0,
                .scan_type = {
                        .sign = 'u',
                        .realbits = 24,
-                       .storagebits = 24,
+                       .storagebits = 32,
                        .endianness = IIO_LE,
                },
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
-               .modified = 0,
        },
        {
                .type = IIO_TEMP,
-               .channel2 = IIO_NO_MOD,
                .address = ST_TEMP_1_OUT_L_ADDR,
-               .scan_index = -1,
+               .scan_index = 1,
                .scan_type = {
                        .sign = 'u',
                        .realbits = 16,
@@ -148,17 +249,15 @@ static const struct iio_chan_spec st_press_1_channels[] = {
                        BIT(IIO_CHAN_INFO_RAW) |
                        BIT(IIO_CHAN_INFO_SCALE) |
                        BIT(IIO_CHAN_INFO_OFFSET),
-               .modified = 0,
        },
-       IIO_CHAN_SOFT_TIMESTAMP(1)
+       IIO_CHAN_SOFT_TIMESTAMP(2)
 };
 
 static const struct iio_chan_spec st_press_lps001wp_channels[] = {
        {
                .type = IIO_PRESSURE,
-               .channel2 = IIO_NO_MOD,
                .address = ST_PRESS_LPS001WP_OUT_L_ADDR,
-               .scan_index = ST_SENSORS_SCAN_X,
+               .scan_index = 0,
                .scan_type = {
                        .sign = 'u',
                        .realbits = 16,
@@ -168,13 +267,11 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) |
                        BIT(IIO_CHAN_INFO_SCALE),
-               .modified = 0,
        },
        {
                .type = IIO_TEMP,
-               .channel2 = IIO_NO_MOD,
                .address = ST_TEMP_LPS001WP_OUT_L_ADDR,
-               .scan_index = -1,
+               .scan_index = 1,
                .scan_type = {
                        .sign = 'u',
                        .realbits = 16,
@@ -184,9 +281,42 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) |
                        BIT(IIO_CHAN_INFO_SCALE),
-               .modified = 0,
        },
-       IIO_CHAN_SOFT_TIMESTAMP(1)
+       IIO_CHAN_SOFT_TIMESTAMP(2)
+};
+
+static const struct iio_chan_spec st_press_lps22hb_channels[] = {
+       {
+               .type = IIO_PRESSURE,
+               .address = ST_PRESS_1_OUT_XL_ADDR,
+               .scan_index = 0,
+               .scan_type = {
+                       .sign = 'u',
+                       .realbits = 24,
+                       .storagebits = 32,
+                       .endianness = IIO_LE,
+               },
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) |
+                       BIT(IIO_CHAN_INFO_SCALE),
+               .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+       },
+       {
+               .type = IIO_TEMP,
+               .address = ST_TEMP_1_OUT_L_ADDR,
+               .scan_index = 1,
+               .scan_type = {
+                       .sign = 's',
+                       .realbits = 16,
+                       .storagebits = 16,
+                       .endianness = IIO_LE,
+               },
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) |
+                       BIT(IIO_CHAN_INFO_SCALE),
+               .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+       },
+       IIO_CHAN_SOFT_TIMESTAMP(2)
 };
 
 static const struct st_sensor_settings st_press_sensors_settings[] = {
@@ -346,6 +476,59 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                .multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
                .bootime = 2,
        },
+       {
+               .wai = ST_PRESS_LPS22HB_WAI_EXP,
+               .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+               .sensors_supported = {
+                       [0] = LPS22HB_PRESS_DEV_NAME,
+               },
+               .ch = (struct iio_chan_spec *)st_press_lps22hb_channels,
+               .num_ch = ARRAY_SIZE(st_press_lps22hb_channels),
+               .odr = {
+                       .addr = ST_PRESS_LPS22HB_ODR_ADDR,
+                       .mask = ST_PRESS_LPS22HB_ODR_MASK,
+                       .odr_avl = {
+                               { 1, ST_PRESS_LPS22HB_ODR_AVL_1HZ_VAL, },
+                               { 10, ST_PRESS_LPS22HB_ODR_AVL_10HZ_VAL, },
+                               { 25, ST_PRESS_LPS22HB_ODR_AVL_25HZ_VAL, },
+                               { 50, ST_PRESS_LPS22HB_ODR_AVL_50HZ_VAL, },
+                               { 75, ST_PRESS_LPS22HB_ODR_AVL_75HZ_VAL, },
+                       },
+               },
+               .pw = {
+                       .addr = ST_PRESS_LPS22HB_PW_ADDR,
+                       .mask = ST_PRESS_LPS22HB_PW_MASK,
+                       .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+               },
+               .fs = {
+                       .fs_avl = {
+                               /*
+                                * Pressure and temperature sensitivity values
+                                * as defined in table 3 of LPS22HB datasheet.
+                                */
+                               [0] = {
+                                       .num = ST_PRESS_FS_AVL_1260MB,
+                                       .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+                                       .gain2 = ST_PRESS_LPS22HB_LSB_PER_CELSIUS,
+                               },
+                       },
+               },
+               .bdu = {
+                       .addr = ST_PRESS_LPS22HB_BDU_ADDR,
+                       .mask = ST_PRESS_LPS22HB_BDU_MASK,
+               },
+               .drdy_irq = {
+                       .addr = ST_PRESS_LPS22HB_DRDY_IRQ_ADDR,
+                       .mask_int1 = ST_PRESS_LPS22HB_DRDY_IRQ_INT1_MASK,
+                       .mask_int2 = ST_PRESS_LPS22HB_DRDY_IRQ_INT2_MASK,
+                       .addr_ihl = ST_PRESS_LPS22HB_IHL_IRQ_ADDR,
+                       .mask_ihl = ST_PRESS_LPS22HB_IHL_IRQ_MASK,
+                       .addr_od = ST_PRESS_LPS22HB_OD_IRQ_ADDR,
+                       .mask_od = ST_PRESS_LPS22HB_OD_IRQ_MASK,
+                       .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
+               },
+               .multi_read_bit = ST_PRESS_LPS22HB_MULTIREAD_BIT,
+       },
 };
 
 static int st_press_write_raw(struct iio_dev *indio_dev,
@@ -462,23 +645,30 @@ int st_press_common_probe(struct iio_dev *indio_dev)
        indio_dev->info = &press_info;
        mutex_init(&press_data->tb.buf_lock);
 
-       st_sensors_power_enable(indio_dev);
+       err = st_sensors_power_enable(indio_dev);
+       if (err)
+               return err;
 
        err = st_sensors_check_device_support(indio_dev,
                                        ARRAY_SIZE(st_press_sensors_settings),
                                        st_press_sensors_settings);
        if (err < 0)
-               return err;
-
-       press_data->num_data_channels = ST_PRESS_NUMBER_DATA_CHANNELS;
+               goto st_press_power_off;
+
+       /*
+        * Skip timestamping channel while declaring available channels to
+        * common st_sensor layer. Look at st_sensors_get_buffer_element() to
+        * see how timestamps are explicitly pushed as last samples block
+        * element.
+        */
+       press_data->num_data_channels = press_data->sensor_settings->num_ch - 1;
        press_data->multiread_bit = press_data->sensor_settings->multi_read_bit;
        indio_dev->channels = press_data->sensor_settings->ch;
        indio_dev->num_channels = press_data->sensor_settings->num_ch;
 
-       if (press_data->sensor_settings->fs.addr != 0)
-               press_data->current_fullscale =
-                       (struct st_sensor_fullscale_avl *)
-                               &press_data->sensor_settings->fs.fs_avl[0];
+       press_data->current_fullscale =
+               (struct st_sensor_fullscale_avl *)
+                       &press_data->sensor_settings->fs.fs_avl[0];
 
        press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz;
 
@@ -490,11 +680,11 @@ int st_press_common_probe(struct iio_dev *indio_dev)
 
        err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data);
        if (err < 0)
-               return err;
+               goto st_press_power_off;
 
        err = st_press_allocate_ring(indio_dev);
        if (err < 0)
-               return err;
+               goto st_press_power_off;
 
        if (irq > 0) {
                err = st_sensors_allocate_trigger(indio_dev,
@@ -517,6 +707,8 @@ st_press_device_register_error:
                st_sensors_deallocate_trigger(indio_dev);
 st_press_probe_trigger_error:
        st_press_deallocate_ring(indio_dev);
+st_press_power_off:
+       st_sensors_power_disable(indio_dev);
 
        return err;
 }
index 8fcf9766eaecbf0f3ef191e21a58e242e6419624..ed18701c68c97efe1d5604a85e6178bd097ea123 100644 (file)
@@ -32,6 +32,10 @@ static const struct of_device_id st_press_of_match[] = {
                .compatible = "st,lps331ap-press",
                .data = LPS331AP_PRESS_DEV_NAME,
        },
+       {
+               .compatible = "st,lps22hb-press",
+               .data = LPS22HB_PRESS_DEV_NAME,
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, st_press_of_match);
index 40c0692ff1de339f123d93d9056fb52134e673df..550508025af1a31dfcf48a5803fce711e12f9243 100644 (file)
@@ -50,6 +50,7 @@ static const struct spi_device_id st_press_id_table[] = {
        { LPS001WP_PRESS_DEV_NAME },
        { LPS25H_PRESS_DEV_NAME },
        { LPS331AP_PRESS_DEV_NAME },
+       { LPS22HB_PRESS_DEV_NAME },
        {},
 };
 MODULE_DEVICE_TABLE(spi, st_press_id_table);
index e2f926cdcad2acdbdd8aec50f0f479f52d2a6584..2e3a70e1b24541677d80661d9dedb7742d1f3341 100644 (file)
@@ -231,10 +231,16 @@ static void as3935_event_work(struct work_struct *work)
 {
        struct as3935_state *st;
        int val;
+       int ret;
 
        st = container_of(work, struct as3935_state, work.work);
 
-       as3935_read(st, AS3935_INT, &val);
+       ret = as3935_read(st, AS3935_INT, &val);
+       if (ret) {
+               dev_warn(&st->spi->dev, "read error\n");
+               return;
+       }
+
        val &= AS3935_INT_MASK;
 
        switch (val) {
@@ -242,7 +248,7 @@ static void as3935_event_work(struct work_struct *work)
                iio_trigger_poll(st->trig);
                break;
        case AS3935_NOISE_INT:
-               dev_warn(&st->spi->dev, "noise level is too high");
+               dev_warn(&st->spi->dev, "noise level is too high\n");
                break;
        }
 }
@@ -346,7 +352,6 @@ static int as3935_probe(struct spi_device *spi)
 
        st = iio_priv(indio_dev);
        st->spi = spi;
-       st->tune_cap = 0;
 
        spi_set_drvdata(spi, indio_dev);
        mutex_init(&st->lock);
@@ -468,4 +473,3 @@ module_spi_driver(as3935_driver);
 MODULE_AUTHOR("Matt Ranostay <mranostay@gmail.com>");
 MODULE_DESCRIPTION("AS3935 lightning sensor");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:as3935");
index 4f502386aa8629ffcc935fd09a1d360c1905413c..3141c3c161bb4335cae5bc16f40c72339c474948 100644 (file)
@@ -203,22 +203,19 @@ static int lidar_read_raw(struct iio_dev *indio_dev,
        struct lidar_data *data = iio_priv(indio_dev);
        int ret = -EINVAL;
 
-       mutex_lock(&indio_dev->mlock);
-
-       if (iio_buffer_enabled(indio_dev) && mask == IIO_CHAN_INFO_RAW) {
-               ret = -EBUSY;
-               goto error_busy;
-       }
-
        switch (mask) {
        case IIO_CHAN_INFO_RAW: {
                u16 reg;
 
+               if (iio_device_claim_direct_mode(indio_dev))
+                       return -EBUSY;
+
                ret = lidar_get_measurement(data, &reg);
                if (!ret) {
                        *val = reg;
                        ret = IIO_VAL_INT;
                }
+               iio_device_release_direct_mode(indio_dev);
                break;
        }
        case IIO_CHAN_INFO_SCALE:
@@ -228,9 +225,6 @@ static int lidar_read_raw(struct iio_dev *indio_dev,
                break;
        }
 
-error_busy:
-       mutex_unlock(&indio_dev->mlock);
-
        return ret;
 }
 
@@ -244,7 +238,7 @@ static irqreturn_t lidar_trigger_handler(int irq, void *private)
        ret = lidar_get_measurement(data, data->buffer);
        if (!ret) {
                iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
-                                                  iio_get_time_ns());
+                                                  iio_get_time_ns(indio_dev));
        } else if (ret != -EINVAL) {
                dev_err(&data->client->dev, "cannot read LIDAR measurement");
        }
index 66cd09a18786a9123c7eeeb82c0e503ae9dee5e6..1d74b3aafeedbcc7a7c44dc8abd8505424b17932 100644 (file)
@@ -492,7 +492,7 @@ static void sx9500_push_events(struct iio_dev *indio_dev)
                dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
                ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
                                          IIO_EV_TYPE_THRESH, dir);
-               iio_push_event(indio_dev, ev, iio_get_time_ns());
+               iio_push_event(indio_dev, ev, iio_get_time_ns(indio_dev));
                data->prox_stat[chan] = new_prox;
        }
 }
@@ -669,7 +669,7 @@ static irqreturn_t sx9500_trigger_handler(int irq, void *private)
        }
 
        iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
-                                          iio_get_time_ns());
+                                          iio_get_time_ns(indio_dev));
 
 out:
        mutex_unlock(&data->mutex);
index ab6fe8f6f2d15da7aab32f379dc00623b6d87fcc..c0a19a00038716258a255223fe5709c993e509dc 100644 (file)
@@ -174,6 +174,7 @@ static const struct i2c_device_id tsys02d_id[] = {
        {"tsys02d", 0},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, tsys02d_id);
 
 static struct i2c_driver tsys02d_driver = {
        .probe = tsys02d_probe,
index 519e6772f6f57d53cd2ea6065efa88bafe073766..809b2e7d58faf5435f6ca1d70f5c183f5d526eb2 100644 (file)
@@ -24,6 +24,18 @@ config IIO_INTERRUPT_TRIGGER
          To compile this driver as a module, choose M here: the
          module will be called iio-trig-interrupt.
 
+config IIO_TIGHTLOOP_TRIGGER
+       tristate "A kthread based hammering loop trigger"
+       depends on IIO_SW_TRIGGER
+       help
+         An experimental trigger, used to allow sensors to be sampled as fast
+         as possible under the limitations of whatever else is going on.
+         Uses a tight loop in a kthread.  Will only work with lower half only
+         trigger consumers.
+
+         To compile this driver as a module, choose M here: the
+         module will be called iio-trig-loop.
+
 config IIO_SYSFS_TRIGGER
        tristate "SYSFS trigger"
        depends on SYSFS
index fe06eb564367ee0360d378133d22e8c13477e464..aab4dc23303dbd554c45bb6a468d95a6e7a5b19d 100644 (file)
@@ -7,3 +7,4 @@
 obj-$(CONFIG_IIO_HRTIMER_TRIGGER) += iio-trig-hrtimer.o
 obj-$(CONFIG_IIO_INTERRUPT_TRIGGER) += iio-trig-interrupt.o
 obj-$(CONFIG_IIO_SYSFS_TRIGGER) += iio-trig-sysfs.o
+obj-$(CONFIG_IIO_TIGHTLOOP_TRIGGER) += iio-trig-loop.o
diff --git a/drivers/iio/trigger/iio-trig-loop.c b/drivers/iio/trigger/iio-trig-loop.c
new file mode 100644 (file)
index 0000000..dc6be28
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2016 Jonathan Cameron <jic23@kernel.org>
+ *
+ * Licensed under the GPL-2.
+ *
+ * Based on a mashup of the hrtimer trigger and continuous sampling proposal of
+ * Gregor Boirie <gregor.boirie@parrot.com>
+ *
+ * Note this is still rather experimental and may eat babies.
+ *
+ * Todo
+ * * Protect against connection of devices that 'need' the top half
+ *   handler.
+ * * Work out how to run top half handlers in this context if it is
+ *   safe to do so (timestamp grabbing for example)
+ *
+ * Tested against a max1363. Used about 33% cpu for the thread and 20%
+ * for generic_buffer piping to /dev/null. Watermark set at 64 on a 128
+ * element kfifo buffer.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/irq_work.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/sw_trigger.h>
+
+struct iio_loop_info {
+       struct iio_sw_trigger swt;
+       struct task_struct *task;
+};
+
+static struct config_item_type iio_loop_type = {
+       .ct_owner = THIS_MODULE,
+};
+
+static int iio_loop_thread(void *data)
+{
+       struct iio_trigger *trig = data;
+
+       set_freezable();
+
+       do {
+               iio_trigger_poll_chained(trig);
+       } while (likely(!kthread_freezable_should_stop(NULL)));
+
+       return 0;
+}
+
+static int iio_loop_trigger_set_state(struct iio_trigger *trig, bool state)
+{
+       struct iio_loop_info *loop_trig = iio_trigger_get_drvdata(trig);
+
+       if (state) {
+               loop_trig->task = kthread_run(iio_loop_thread,
+                                             trig, trig->name);
+               if (unlikely(IS_ERR(loop_trig->task))) {
+                       dev_err(&trig->dev,
+                               "failed to create trigger loop thread\n");
+                       return PTR_ERR(loop_trig->task);
+               }
+       } else {
+               kthread_stop(loop_trig->task);
+       }
+
+       return 0;
+}
+
+static const struct iio_trigger_ops iio_loop_trigger_ops = {
+       .set_trigger_state = iio_loop_trigger_set_state,
+       .owner = THIS_MODULE,
+};
+
+static struct iio_sw_trigger *iio_trig_loop_probe(const char *name)
+{
+       struct iio_loop_info *trig_info;
+       int ret;
+
+       trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL);
+       if (!trig_info)
+               return ERR_PTR(-ENOMEM);
+
+       trig_info->swt.trigger = iio_trigger_alloc("%s", name);
+       if (!trig_info->swt.trigger) {
+               ret = -ENOMEM;
+               goto err_free_trig_info;
+       }
+
+       iio_trigger_set_drvdata(trig_info->swt.trigger, trig_info);
+       trig_info->swt.trigger->ops = &iio_loop_trigger_ops;
+
+       ret = iio_trigger_register(trig_info->swt.trigger);
+       if (ret)
+               goto err_free_trigger;
+
+       iio_swt_group_init_type_name(&trig_info->swt, name, &iio_loop_type);
+
+       return &trig_info->swt;
+
+err_free_trigger:
+       iio_trigger_free(trig_info->swt.trigger);
+err_free_trig_info:
+       kfree(trig_info);
+
+       return ERR_PTR(ret);
+}
+
+static int iio_trig_loop_remove(struct iio_sw_trigger *swt)
+{
+       struct iio_loop_info *trig_info;
+
+       trig_info = iio_trigger_get_drvdata(swt->trigger);
+
+       iio_trigger_unregister(swt->trigger);
+       iio_trigger_free(swt->trigger);
+       kfree(trig_info);
+
+       return 0;
+}
+
+static const struct iio_sw_trigger_ops iio_trig_loop_ops = {
+       .probe = iio_trig_loop_probe,
+       .remove = iio_trig_loop_remove,
+};
+
+static struct iio_sw_trigger_type iio_trig_loop = {
+       .name = "loop",
+       .owner = THIS_MODULE,
+       .ops = &iio_trig_loop_ops,
+};
+
+module_iio_sw_trigger_driver(iio_trig_loop);
+
+MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
+MODULE_DESCRIPTION("Loop based trigger for the iio subsystem");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:iio-trig-loop");
index a5793c8f15901483e24e24393173ea8dbee6b191..60df4f8e81bed10ac8e9b6149ec4c3ef5c6abf9f 100644 (file)
@@ -530,6 +530,7 @@ static PORT_PMA_ATTR(port_xmit_data             , 12, 32, 192);
 static PORT_PMA_ATTR(port_rcv_data                 , 13, 32, 224);
 static PORT_PMA_ATTR(port_xmit_packets             , 14, 32, 256);
 static PORT_PMA_ATTR(port_rcv_packets              , 15, 32, 288);
+static PORT_PMA_ATTR(port_xmit_wait                ,  0, 32, 320);
 
 /*
  * Counters added by extended set
@@ -560,6 +561,7 @@ static struct attribute *pma_attrs[] = {
        &port_pma_attr_port_rcv_data.attr.attr,
        &port_pma_attr_port_xmit_packets.attr.attr,
        &port_pma_attr_port_rcv_packets.attr.attr,
+       &port_pma_attr_port_xmit_wait.attr.attr,
        NULL
 };
 
@@ -579,6 +581,7 @@ static struct attribute *pma_attrs_ext[] = {
        &port_pma_attr_ext_port_xmit_data.attr.attr,
        &port_pma_attr_ext_port_rcv_data.attr.attr,
        &port_pma_attr_ext_port_xmit_packets.attr.attr,
+       &port_pma_attr_port_xmit_wait.attr.attr,
        &port_pma_attr_ext_port_rcv_packets.attr.attr,
        &port_pma_attr_ext_unicast_rcv_packets.attr.attr,
        &port_pma_attr_ext_unicast_xmit_packets.attr.attr,
@@ -604,6 +607,7 @@ static struct attribute *pma_attrs_noietf[] = {
        &port_pma_attr_ext_port_rcv_data.attr.attr,
        &port_pma_attr_ext_port_xmit_packets.attr.attr,
        &port_pma_attr_ext_port_rcv_packets.attr.attr,
+       &port_pma_attr_port_xmit_wait.attr.attr,
        NULL
 };
 
index f5de851780555a5c9f06d91c2eb9bb2ff14c666e..dad4d0ebbdffb45e2c667cc95b86aabf39b53a87 100644 (file)
@@ -14113,8 +14113,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
 {
        unsigned long flags;
        struct hfi1_devdata *tmp, *peer = NULL;
+       struct hfi1_asic_data *asic_data;
        int ret = 0;
 
+       /* pre-allocate the asic structure in case we are the first device */
+       asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
+       if (!asic_data)
+               return -ENOMEM;
+
        spin_lock_irqsave(&hfi1_devs_lock, flags);
        /* Find our peer device */
        list_for_each_entry(tmp, &hfi1_dev_list, list) {
@@ -14126,18 +14132,14 @@ static int init_asic_data(struct hfi1_devdata *dd)
        }
 
        if (peer) {
+               /* use already allocated structure */
                dd->asic_data = peer->asic_data;
+               kfree(asic_data);
        } else {
-               dd->asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
-               if (!dd->asic_data) {
-                       ret = -ENOMEM;
-                       goto done;
-               }
+               dd->asic_data = asic_data;
                mutex_init(&dd->asic_data->asic_resource_mutex);
        }
        dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
-
-done:
        spin_unlock_irqrestore(&hfi1_devs_lock, flags);
        return ret;
 }
index 1e503ad0bebb764ee1e05462604538e6324002f8..be91f6fa1c87b16fe4f31affd6c12848b1951792 100644 (file)
@@ -678,8 +678,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
        u32 tlen = packet->tlen;
        struct rvt_qp *qp = packet->qp;
        bool has_grh = rcv_flags & HFI1_HAS_GRH;
-       bool sc4_bit = has_sc4_bit(packet);
-       u8 sc;
+       u8 sc5 = hdr2sc((struct hfi1_message_header *)hdr, packet->rhf);
        u32 bth1;
        int is_mcast;
        struct ib_grh *grh = NULL;
@@ -697,10 +696,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
                 */
                struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
                u32 lqpn =  be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
-               u8 sl, sc5;
+               u8 sl;
 
-               sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-               sc5 |= sc4_bit;
                sl = ibp->sc_to_sl[sc5];
 
                process_becn(ppd, sl, 0, lqpn, 0, IB_CC_SVCTYPE_UD);
@@ -717,10 +714,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
 
        if (!is_mcast && (opcode != IB_OPCODE_CNP) && bth1 & HFI1_FECN_SMASK) {
                u16 slid = be16_to_cpu(hdr->lrh[3]);
-               u8 sc5;
-
-               sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-               sc5 |= sc4_bit;
 
                return_cnp(ibp, qp, src_qp, pkey, dlid, slid, sc5, grh);
        }
@@ -745,10 +738,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
                if (qp->ibqp.qp_num > 1) {
                        struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
                        u16 slid;
-                       u8 sc5;
-
-                       sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-                       sc5 |= sc4_bit;
 
                        slid = be16_to_cpu(hdr->lrh[3]);
                        if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
@@ -790,10 +779,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
                /* Received on QP0, and so by definition, this is an SMP */
                struct opa_smp *smp = (struct opa_smp *)data;
                u16 slid = be16_to_cpu(hdr->lrh[3]);
-               u8 sc5;
-
-               sc5 = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-               sc5 |= sc4_bit;
 
                if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
                        goto drop;
@@ -890,9 +875,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
        }
 
        wc.slid = be16_to_cpu(hdr->lrh[3]);
-       sc = (be16_to_cpu(hdr->lrh[0]) >> 12) & 0xf;
-       sc |= sc4_bit;
-       wc.sl = ibp->sc_to_sl[sc];
+       wc.sl = ibp->sc_to_sl[sc5];
 
        /*
         * Save the LMC lower bits if the destination LID is a unicast LID.
index c963cad92f5a8eb061af74ae964e9222115e1815..6e9081380a276cbb78da8687820b9c7092f684af 100644 (file)
@@ -600,8 +600,7 @@ static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
        cqp_init_info.scratch_array = cqp->scratch_array;
        status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
        if (status) {
-               i40iw_pr_err("cqp init status %d maj_err %d min_err %d\n",
-                            status, maj_err, min_err);
+               i40iw_pr_err("cqp init status %d\n", status);
                goto exit;
        }
        status = dev->cqp_ops->cqp_create(dev->cqp, true, &maj_err, &min_err);
index 33959ed14563e0f43e5209320ba0fd5ff0338abb..283b64c942eebfea6378ee8f4ad5206e6f549876 100644 (file)
@@ -1474,6 +1474,7 @@ static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr
        info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT;
        info->pd_id = iwpd->sc_pd.pd_id;
        info->total_len = iwmr->length;
+       info->remote_access = true;
        cqp_info->cqp_cmd = OP_ALLOC_STAG;
        cqp_info->post_sq = 1;
        cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev;
index 3438e98c145a9530058408c98524f75e543f658a..a529a4535457217e761fd1e8f8b0748cb48a5498 100644 (file)
@@ -1431,6 +1431,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
        int ep_irq_in_idx;
        int i, error;
 
+       if (intf->cur_altsetting->desc.bNumEndpoints != 2)
+               return -ENODEV;
+
        for (i = 0; xpad_device[i].idVendor; i++) {
                if ((le16_to_cpu(udev->descriptor.idVendor) == xpad_device[i].idVendor) &&
                    (le16_to_cpu(udev->descriptor.idProduct) == xpad_device[i].idProduct))
index b368b0515c5a3aa26161dd7893a1c22c2678e4b2..253df96be4276cc9196a6c0ab25b04f838d061ac 100644 (file)
@@ -157,11 +157,11 @@ static int rmi_function_match(struct device *dev, struct device_driver *drv)
 static void rmi_function_of_probe(struct rmi_function *fn)
 {
        char of_name[9];
+       struct device_node *node = fn->rmi_dev->xport->dev->of_node;
 
        snprintf(of_name, sizeof(of_name), "rmi4-f%02x",
                fn->fd.function_number);
-       fn->dev.of_node = of_find_node_by_name(
-                               fn->rmi_dev->xport->dev->of_node, of_name);
+       fn->dev.of_node = of_get_child_by_name(node, of_name);
 }
 #else
 static inline void rmi_function_of_probe(struct rmi_function *fn)
index 8dd3fb5e1f9433f013f18c51728205c425a61560..88e91559c84e2dcad267a0e20fd4e65bc56238bb 100644 (file)
@@ -66,7 +66,7 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
        struct rmi_device *rmi_dev = fn->rmi_dev;
        int ret;
        int offset;
-       u8 buf[14];
+       u8 buf[15];
        int pitch_x = 0;
        int pitch_y = 0;
        int clip_x_low = 0;
@@ -86,9 +86,10 @@ static int rmi_f12_read_sensor_tuning(struct f12_data *f12)
 
        offset = rmi_register_desc_calc_reg_offset(&f12->control_reg_desc, 8);
 
-       if (item->reg_size > 14) {
-               dev_err(&fn->dev, "F12 control8 should be 14 bytes, not: %ld\n",
-                       item->reg_size);
+       if (item->reg_size > sizeof(buf)) {
+               dev_err(&fn->dev,
+                       "F12 control8 should be no bigger than %zd bytes, not: %ld\n",
+                       sizeof(buf), item->reg_size);
                return -ENODEV;
        }
 
index 3c3dd78303be0e68f1373cb01d6609be6a1efbc7..fed73eeb47b3c6e263ca2e0415d9b385cc2f4854 100644 (file)
@@ -118,6 +118,13 @@ static int ts4800_parse_dt(struct platform_device *pdev,
                return -ENODEV;
        }
 
+       ts->regmap = syscon_node_to_regmap(syscon_np);
+       of_node_put(syscon_np);
+       if (IS_ERR(ts->regmap)) {
+               dev_err(dev, "cannot get parent's regmap\n");
+               return PTR_ERR(ts->regmap);
+       }
+
        error = of_property_read_u32_index(np, "syscon", 1, &reg);
        if (error < 0) {
                dev_err(dev, "no offset in syscon\n");
@@ -134,12 +141,6 @@ static int ts4800_parse_dt(struct platform_device *pdev,
 
        ts->bit = BIT(bit);
 
-       ts->regmap = syscon_node_to_regmap(syscon_np);
-       if (IS_ERR(ts->regmap)) {
-               dev_err(dev, "cannot get parent's regmap\n");
-               return PTR_ERR(ts->regmap);
-       }
-
        return 0;
 }
 
index 7295c198aa086be8630b3a77e202415649c6937b..6fe55d598facac3a05114e57e03b92b9d162eb87 100644 (file)
 #include <linux/regmap.h>
 #include "tsc200x-core.h"
 
+static const struct input_id tsc2004_input_id = {
+       .bustype = BUS_I2C,
+       .product = 2004,
+};
+
 static int tsc2004_cmd(struct device *dev, u8 cmd)
 {
        u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -42,7 +47,7 @@ static int tsc2004_probe(struct i2c_client *i2c,
                         const struct i2c_device_id *id)
 
 {
-       return tsc200x_probe(&i2c->dev, i2c->irq, BUS_I2C,
+       return tsc200x_probe(&i2c->dev, i2c->irq, &tsc2004_input_id,
                             devm_regmap_init_i2c(i2c, &tsc200x_regmap_config),
                             tsc2004_cmd);
 }
index b9f593dfd2ef8368223e5d7d7d9ca06856afcc8d..f2c5f0e47f77dd6ab177adec2bb102ed0da5dd08 100644 (file)
 #include <linux/regmap.h>
 #include "tsc200x-core.h"
 
+static const struct input_id tsc2005_input_id = {
+       .bustype = BUS_SPI,
+       .product = 2005,
+};
+
 static int tsc2005_cmd(struct device *dev, u8 cmd)
 {
        u8 tx = TSC200X_CMD | TSC200X_CMD_12BIT | cmd;
@@ -62,7 +67,7 @@ static int tsc2005_probe(struct spi_device *spi)
        if (error)
                return error;
 
-       return tsc200x_probe(&spi->dev, spi->irq, BUS_SPI,
+       return tsc200x_probe(&spi->dev, spi->irq, &tsc2005_input_id,
                             devm_regmap_init_spi(spi, &tsc200x_regmap_config),
                             tsc2005_cmd);
 }
index 15240c1ee850abd4f5099db9a9e21a4b8ecf725b..dfa7f1c4f5453bd9d1544badc24548eab0851361 100644 (file)
@@ -450,7 +450,7 @@ static void tsc200x_close(struct input_dev *input)
        mutex_unlock(&ts->mutex);
 }
 
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
                  struct regmap *regmap,
                  int (*tsc200x_cmd)(struct device *dev, u8 cmd))
 {
@@ -547,9 +547,18 @@ int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
        snprintf(ts->phys, sizeof(ts->phys),
                 "%s/input-ts", dev_name(dev));
 
-       input_dev->name = "TSC200X touchscreen";
+       if (tsc_id->product == 2004) {
+               input_dev->name = "TSC200X touchscreen";
+       } else {
+               input_dev->name = devm_kasprintf(dev, GFP_KERNEL,
+                                                "TSC%04d touchscreen",
+                                                tsc_id->product);
+               if (!input_dev->name)
+                       return -ENOMEM;
+       }
+
        input_dev->phys = ts->phys;
-       input_dev->id.bustype = bustype;
+       input_dev->id = *tsc_id;
        input_dev->dev.parent = dev;
        input_dev->evbit[0] = BIT(EV_ABS) | BIT(EV_KEY);
        input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
index 7a482d1026148c692988172773af1c3f2ad9e57e..49a63a3c684094a331a7465dda370f1a2f0fcb21 100644 (file)
@@ -70,7 +70,7 @@
 extern const struct regmap_config tsc200x_regmap_config;
 extern const struct dev_pm_ops tsc200x_pm_ops;
 
-int tsc200x_probe(struct device *dev, int irq, __u16 bustype,
+int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
                  struct regmap *regmap,
                  int (*tsc200x_cmd)(struct device *dev, u8 cmd));
 int tsc200x_remove(struct device *dev);
index 0c9191cf324d491ab25117ab654c6f1bf91375de..b6fc4bde79ded75430725f97f72d7a9c3bae2c6b 100644 (file)
@@ -155,6 +155,7 @@ static void parse_multi_touch(struct w8001 *w8001)
                bool touch = data[0] & (1 << i);
 
                input_mt_slot(dev, i);
+               input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch);
                if (touch) {
                        x = (data[6 * i + 1] << 7) | data[6 * i + 2];
                        y = (data[6 * i + 3] << 7) | data[6 * i + 4];
@@ -522,6 +523,8 @@ static int w8001_setup_touch(struct w8001 *w8001, char *basename,
                                        0, touch.x, 0, 0);
                input_set_abs_params(dev, ABS_MT_POSITION_Y,
                                        0, touch.y, 0, 0);
+               input_set_abs_params(dev, ABS_MT_TOOL_TYPE,
+                                       0, MT_TOOL_MAX, 0, 0);
 
                strlcat(basename, " 2FG", basename_sz);
                if (w8001->max_pen_x && w8001->max_pen_y)
index d091defc34260e23d04969f2c93445342387da49..59741ead7e158ca4e7b0d6506a0529c03c3fe412 100644 (file)
@@ -1568,13 +1568,23 @@ static int __init amd_iommu_init_pci(void)
                        break;
        }
 
+       /*
+        * Order is important here to make sure any unity map requirements are
+        * fulfilled. The unity mappings are created and written to the device
+        * table during the amd_iommu_init_api() call.
+        *
+        * After that we call init_device_table_dma() to make sure any
+        * uninitialized DTE will block DMA, and in the end we flush the caches
+        * of all IOMMUs to make sure the changes to the device table are
+        * active.
+        */
+       ret = amd_iommu_init_api();
+
        init_device_table_dma();
 
        for_each_iommu(iommu)
                iommu_flush_all_caches(iommu);
 
-       ret = amd_iommu_init_api();
-
        if (!ret)
                print_iommu_info();
 
index cfe410eedaf0b9e7188ca462a4072443b3162bac..323dac9900ba3cc0a0c1351dfc39de86b4cb06ab 100644 (file)
@@ -4602,13 +4602,13 @@ static void free_all_cpu_cached_iovas(unsigned int cpu)
        for (i = 0; i < g_num_of_iommus; i++) {
                struct intel_iommu *iommu = g_iommus[i];
                struct dmar_domain *domain;
-               u16 did;
+               int did;
 
                if (!iommu)
                        continue;
 
                for (did = 0; did < cap_ndoms(iommu->cap); did++) {
-                       domain = get_iommu_domain(iommu, did);
+                       domain = get_iommu_domain(iommu, (u16)did);
 
                        if (!domain)
                                continue;
index f4dff5665e4eb3fc376171c4ab06471e9e6b0167..3786d0f2197227d7cfbe9fba3762a6d4b50b5703 100644 (file)
@@ -718,7 +718,7 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
 
        spin_lock_irqsave(&gic_lock, flags);
        gic_map_to_pin(intr, gic_cpu_pin);
-       gic_map_to_vpe(intr, vpe);
+       gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
        for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
                clear_bit(intr, pcpu_masks[i].pcpu_mask);
        set_bit(intr, pcpu_masks[vpe].pcpu_mask);
@@ -959,7 +959,7 @@ int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
        switch (bus_token) {
        case DOMAIN_BUS_IPI:
                is_ipi = d->bus_token == bus_token;
-               return to_of_node(d->fwnode) == node && is_ipi;
+               return (!node || to_of_node(d->fwnode) == node) && is_ipi;
                break;
        default:
                return 0;
index beb2841ceae58bbb69d65d99bf3697ee6f5dc686..3f1ab4986cfc507d2cf70a451cbbf2962deac2b8 100644 (file)
@@ -779,11 +779,31 @@ static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = {
                        V4L2_DV_BT_CAP_CUSTOM)
 };
 
-static inline const struct v4l2_dv_timings_cap *
-adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd)
+/*
+ * Return the DV timings capabilities for the requested sink pad. As a special
+ * case, pad value -1 returns the capabilities for the currently selected input.
+ */
+static const struct v4l2_dv_timings_cap *
+adv76xx_get_dv_timings_cap(struct v4l2_subdev *sd, int pad)
 {
-       return is_digital_input(sd) ? &adv76xx_timings_cap_digital :
-                                     &adv7604_timings_cap_analog;
+       if (pad == -1) {
+               struct adv76xx_state *state = to_state(sd);
+
+               pad = state->selected_input;
+       }
+
+       switch (pad) {
+       case ADV76XX_PAD_HDMI_PORT_A:
+       case ADV7604_PAD_HDMI_PORT_B:
+       case ADV7604_PAD_HDMI_PORT_C:
+       case ADV7604_PAD_HDMI_PORT_D:
+               return &adv76xx_timings_cap_digital;
+
+       case ADV7604_PAD_VGA_RGB:
+       case ADV7604_PAD_VGA_COMP:
+       default:
+               return &adv7604_timings_cap_analog;
+       }
 }
 
 
@@ -1329,7 +1349,7 @@ static int stdi2dv_timings(struct v4l2_subdev *sd,
                const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
 
                if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
-                                          adv76xx_get_dv_timings_cap(sd),
+                                          adv76xx_get_dv_timings_cap(sd, -1),
                                           adv76xx_check_dv_timings, NULL))
                        continue;
                if (vtotal(bt) != stdi->lcf + 1)
@@ -1430,18 +1450,22 @@ static int adv76xx_enum_dv_timings(struct v4l2_subdev *sd,
                return -EINVAL;
 
        return v4l2_enum_dv_timings_cap(timings,
-               adv76xx_get_dv_timings_cap(sd), adv76xx_check_dv_timings, NULL);
+               adv76xx_get_dv_timings_cap(sd, timings->pad),
+               adv76xx_check_dv_timings, NULL);
 }
 
 static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
                        struct v4l2_dv_timings_cap *cap)
 {
        struct adv76xx_state *state = to_state(sd);
+       unsigned int pad = cap->pad;
 
        if (cap->pad >= state->source_pad)
                return -EINVAL;
 
-       *cap = *adv76xx_get_dv_timings_cap(sd);
+       *cap = *adv76xx_get_dv_timings_cap(sd, pad);
+       cap->pad = pad;
+
        return 0;
 }
 
@@ -1450,9 +1474,9 @@ static int adv76xx_dv_timings_cap(struct v4l2_subdev *sd,
 static void adv76xx_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
                struct v4l2_dv_timings *timings)
 {
-       v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd),
-                       is_digital_input(sd) ? 250000 : 1000000,
-                       adv76xx_check_dv_timings, NULL);
+       v4l2_find_dv_timings_cap(timings, adv76xx_get_dv_timings_cap(sd, -1),
+                                is_digital_input(sd) ? 250000 : 1000000,
+                                adv76xx_check_dv_timings, NULL);
 }
 
 static unsigned int adv7604_read_hdmi_pixelclock(struct v4l2_subdev *sd)
@@ -1620,7 +1644,7 @@ static int adv76xx_s_dv_timings(struct v4l2_subdev *sd,
 
        bt = &timings->bt;
 
-       if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd),
+       if (!v4l2_valid_dv_timings(timings, adv76xx_get_dv_timings_cap(sd, -1),
                                   adv76xx_check_dv_timings, NULL))
                return -ERANGE;
 
index 87c12930416f565a118c145501e54df1a6b13cc7..92d9d4214c3adb326dc1a23d0c2c43840172611c 100644 (file)
@@ -1072,7 +1072,7 @@ static int airspy_probe(struct usb_interface *intf,
        if (ret) {
                dev_err(s->dev, "Failed to register as video device (%d)\n",
                                ret);
-               goto err_unregister_v4l2_dev;
+               goto err_free_controls;
        }
        dev_info(s->dev, "Registered as %s\n",
                        video_device_node_name(&s->vdev));
@@ -1081,7 +1081,6 @@ static int airspy_probe(struct usb_interface *intf,
 
 err_free_controls:
        v4l2_ctrl_handler_free(&s->hdl);
-err_unregister_v4l2_dev:
        v4l2_device_unregister(&s->v4l2_dev);
 err_free_mem:
        kfree(s);
index 28e5be2c2eef22f75a1a902131ee987bb927ad0d..528390f33b5336842491d039658a87f11b3a75f2 100644 (file)
@@ -2171,7 +2171,7 @@ static int v4l_cropcap(const struct v4l2_ioctl_ops *ops,
         * The determine_valid_ioctls() call already should ensure
         * that this can never happen, but just in case...
         */
-       if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_cropcap))
+       if (WARN_ON(!ops->vidioc_cropcap && !ops->vidioc_g_selection))
                return -ENOTTY;
 
        if (ops->vidioc_cropcap)
index b2fb6dbffcef0c4999439287b3e1c1a10bdcf264..4387ccb79e642c34f8d3b6c2a39ba4daae15e325 100644 (file)
@@ -57,3 +57,17 @@ obj-$(CONFIG_ECHO)           += echo/
 obj-$(CONFIG_VEXPRESS_SYSCFG)  += vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)         += cxl/
 obj-$(CONFIG_PANEL)             += panel.o
+
+lkdtm-$(CONFIG_LKDTM)          += lkdtm_core.o
+lkdtm-$(CONFIG_LKDTM)          += lkdtm_bugs.o
+lkdtm-$(CONFIG_LKDTM)          += lkdtm_heap.o
+lkdtm-$(CONFIG_LKDTM)          += lkdtm_perms.o
+lkdtm-$(CONFIG_LKDTM)          += lkdtm_rodata_objcopy.o
+lkdtm-$(CONFIG_LKDTM)          += lkdtm_usercopy.o
+
+OBJCOPYFLAGS :=
+OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
+                       --set-section-flags .text=alloc,readonly \
+                       --rename-section .text=.rodata
+$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o
+       $(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
deleted file mode 100644 (file)
index 0a5cbbe..0000000
+++ /dev/null
@@ -1,1023 +0,0 @@
-/*
- * Kprobe module for testing crash dumps
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2006
- *
- * Author: Ankita Garg <ankita@in.ibm.com>
- *
- * This module induces system failures at predefined crashpoints to
- * evaluate the reliability of crash dumps obtained using different dumping
- * solutions.
- *
- * It is adapted from the Linux Kernel Dump Test Tool by
- * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
- *
- * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
- *
- * See Documentation/fault-injection/provoke-crashes.txt for instructions
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/module.h>
-#include <linux/buffer_head.h>
-#include <linux/kprobes.h>
-#include <linux/list.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/hrtimer.h>
-#include <linux/slab.h>
-#include <scsi/scsi_cmnd.h>
-#include <linux/debugfs.h>
-#include <linux/vmalloc.h>
-#include <linux/mman.h>
-#include <asm/cacheflush.h>
-
-#ifdef CONFIG_IDE
-#include <linux/ide.h>
-#endif
-
-/*
- * Make sure our attempts to over run the kernel stack doesn't trigger
- * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
- * recurse past the end of THREAD_SIZE by default.
- */
-#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
-#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
-#else
-#define REC_STACK_SIZE (THREAD_SIZE / 8)
-#endif
-#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
-
-#define DEFAULT_COUNT 10
-#define EXEC_SIZE 64
-
-enum cname {
-       CN_INVALID,
-       CN_INT_HARDWARE_ENTRY,
-       CN_INT_HW_IRQ_EN,
-       CN_INT_TASKLET_ENTRY,
-       CN_FS_DEVRW,
-       CN_MEM_SWAPOUT,
-       CN_TIMERADD,
-       CN_SCSI_DISPATCH_CMD,
-       CN_IDE_CORE_CP,
-       CN_DIRECT,
-};
-
-enum ctype {
-       CT_NONE,
-       CT_PANIC,
-       CT_BUG,
-       CT_WARNING,
-       CT_EXCEPTION,
-       CT_LOOP,
-       CT_OVERFLOW,
-       CT_CORRUPT_STACK,
-       CT_UNALIGNED_LOAD_STORE_WRITE,
-       CT_OVERWRITE_ALLOCATION,
-       CT_WRITE_AFTER_FREE,
-       CT_READ_AFTER_FREE,
-       CT_WRITE_BUDDY_AFTER_FREE,
-       CT_READ_BUDDY_AFTER_FREE,
-       CT_SOFTLOCKUP,
-       CT_HARDLOCKUP,
-       CT_SPINLOCKUP,
-       CT_HUNG_TASK,
-       CT_EXEC_DATA,
-       CT_EXEC_STACK,
-       CT_EXEC_KMALLOC,
-       CT_EXEC_VMALLOC,
-       CT_EXEC_USERSPACE,
-       CT_ACCESS_USERSPACE,
-       CT_WRITE_RO,
-       CT_WRITE_RO_AFTER_INIT,
-       CT_WRITE_KERN,
-       CT_WRAP_ATOMIC
-};
-
-static char* cp_name[] = {
-       "INT_HARDWARE_ENTRY",
-       "INT_HW_IRQ_EN",
-       "INT_TASKLET_ENTRY",
-       "FS_DEVRW",
-       "MEM_SWAPOUT",
-       "TIMERADD",
-       "SCSI_DISPATCH_CMD",
-       "IDE_CORE_CP",
-       "DIRECT",
-};
-
-static char* cp_type[] = {
-       "PANIC",
-       "BUG",
-       "WARNING",
-       "EXCEPTION",
-       "LOOP",
-       "OVERFLOW",
-       "CORRUPT_STACK",
-       "UNALIGNED_LOAD_STORE_WRITE",
-       "OVERWRITE_ALLOCATION",
-       "WRITE_AFTER_FREE",
-       "READ_AFTER_FREE",
-       "WRITE_BUDDY_AFTER_FREE",
-       "READ_BUDDY_AFTER_FREE",
-       "SOFTLOCKUP",
-       "HARDLOCKUP",
-       "SPINLOCKUP",
-       "HUNG_TASK",
-       "EXEC_DATA",
-       "EXEC_STACK",
-       "EXEC_KMALLOC",
-       "EXEC_VMALLOC",
-       "EXEC_USERSPACE",
-       "ACCESS_USERSPACE",
-       "WRITE_RO",
-       "WRITE_RO_AFTER_INIT",
-       "WRITE_KERN",
-       "WRAP_ATOMIC"
-};
-
-static struct jprobe lkdtm;
-
-static int lkdtm_parse_commandline(void);
-static void lkdtm_handler(void);
-
-static char* cpoint_name;
-static char* cpoint_type;
-static int cpoint_count = DEFAULT_COUNT;
-static int recur_count = REC_NUM_DEFAULT;
-
-static enum cname cpoint = CN_INVALID;
-static enum ctype cptype = CT_NONE;
-static int count = DEFAULT_COUNT;
-static DEFINE_SPINLOCK(count_lock);
-static DEFINE_SPINLOCK(lock_me_up);
-
-static u8 data_area[EXEC_SIZE];
-
-static const unsigned long rodata = 0xAA55AA55;
-static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
-
-module_param(recur_count, int, 0644);
-MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
-module_param(cpoint_name, charp, 0444);
-MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
-module_param(cpoint_type, charp, 0444);
-MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
-                               "hitting the crash point");
-module_param(cpoint_count, int, 0644);
-MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
-                               "crash point is to be hit to trigger action");
-
-static unsigned int jp_do_irq(unsigned int irq)
-{
-       lkdtm_handler();
-       jprobe_return();
-       return 0;
-}
-
-static irqreturn_t jp_handle_irq_event(unsigned int irq,
-                                      struct irqaction *action)
-{
-       lkdtm_handler();
-       jprobe_return();
-       return 0;
-}
-
-static void jp_tasklet_action(struct softirq_action *a)
-{
-       lkdtm_handler();
-       jprobe_return();
-}
-
-static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
-{
-       lkdtm_handler();
-       jprobe_return();
-}
-
-struct scan_control;
-
-static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
-                                            struct zone *zone,
-                                            struct scan_control *sc)
-{
-       lkdtm_handler();
-       jprobe_return();
-       return 0;
-}
-
-static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
-                           const enum hrtimer_mode mode)
-{
-       lkdtm_handler();
-       jprobe_return();
-       return 0;
-}
-
-static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
-{
-       lkdtm_handler();
-       jprobe_return();
-       return 0;
-}
-
-#ifdef CONFIG_IDE
-static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
-                       struct block_device *bdev, unsigned int cmd,
-                       unsigned long arg)
-{
-       lkdtm_handler();
-       jprobe_return();
-       return 0;
-}
-#endif
-
-/* Return the crashpoint number or NONE if the name is invalid */
-static enum ctype parse_cp_type(const char *what, size_t count)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
-               if (!strcmp(what, cp_type[i]))
-                       return i + 1;
-       }
-
-       return CT_NONE;
-}
-
-static const char *cp_type_to_str(enum ctype type)
-{
-       if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
-               return "None";
-
-       return cp_type[type - 1];
-}
-
-static const char *cp_name_to_str(enum cname name)
-{
-       if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
-               return "INVALID";
-
-       return cp_name[name - 1];
-}
-
-
-static int lkdtm_parse_commandline(void)
-{
-       int i;
-       unsigned long flags;
-
-       if (cpoint_count < 1 || recur_count < 1)
-               return -EINVAL;
-
-       spin_lock_irqsave(&count_lock, flags);
-       count = cpoint_count;
-       spin_unlock_irqrestore(&count_lock, flags);
-
-       /* No special parameters */
-       if (!cpoint_type && !cpoint_name)
-               return 0;
-
-       /* Neither or both of these need to be set */
-       if (!cpoint_type || !cpoint_name)
-               return -EINVAL;
-
-       cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
-       if (cptype == CT_NONE)
-               return -EINVAL;
-
-       for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
-               if (!strcmp(cpoint_name, cp_name[i])) {
-                       cpoint = i + 1;
-                       return 0;
-               }
-       }
-
-       /* Could not find a valid crash point */
-       return -EINVAL;
-}
-
-static int recursive_loop(int remaining)
-{
-       char buf[REC_STACK_SIZE];
-
-       /* Make sure compiler does not optimize this away. */
-       memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
-       if (!remaining)
-               return 0;
-       else
-               return recursive_loop(remaining - 1);
-}
-
-static void do_nothing(void)
-{
-       return;
-}
-
-/* Must immediately follow do_nothing for size calculuations to work out. */
-static void do_overwritten(void)
-{
-       pr_info("do_overwritten wasn't overwritten!\n");
-       return;
-}
-
-static noinline void corrupt_stack(void)
-{
-       /* Use default char array length that triggers stack protection. */
-       char data[8];
-
-       memset((void *)data, 0, 64);
-}
-
-static void noinline execute_location(void *dst)
-{
-       void (*func)(void) = dst;
-
-       pr_info("attempting ok execution at %p\n", do_nothing);
-       do_nothing();
-
-       memcpy(dst, do_nothing, EXEC_SIZE);
-       flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
-       pr_info("attempting bad execution at %p\n", func);
-       func();
-}
-
-static void execute_user_location(void *dst)
-{
-       /* Intentionally crossing kernel/user memory boundary. */
-       void (*func)(void) = dst;
-
-       pr_info("attempting ok execution at %p\n", do_nothing);
-       do_nothing();
-
-       if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
-               return;
-       flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
-       pr_info("attempting bad execution at %p\n", func);
-       func();
-}
-
-static void lkdtm_do_action(enum ctype which)
-{
-       switch (which) {
-       case CT_PANIC:
-               panic("dumptest");
-               break;
-       case CT_BUG:
-               BUG();
-               break;
-       case CT_WARNING:
-               WARN_ON(1);
-               break;
-       case CT_EXCEPTION:
-               *((int *) 0) = 0;
-               break;
-       case CT_LOOP:
-               for (;;)
-                       ;
-               break;
-       case CT_OVERFLOW:
-               (void) recursive_loop(recur_count);
-               break;
-       case CT_CORRUPT_STACK:
-               corrupt_stack();
-               break;
-       case CT_UNALIGNED_LOAD_STORE_WRITE: {
-               static u8 data[5] __attribute__((aligned(4))) = {1, 2,
-                               3, 4, 5};
-               u32 *p;
-               u32 val = 0x12345678;
-
-               p = (u32 *)(data + 1);
-               if (*p == 0)
-                       val = 0x87654321;
-               *p = val;
-                break;
-       }
-       case CT_OVERWRITE_ALLOCATION: {
-               size_t len = 1020;
-               u32 *data = kmalloc(len, GFP_KERNEL);
-
-               data[1024 / sizeof(u32)] = 0x12345678;
-               kfree(data);
-               break;
-       }
-       case CT_WRITE_AFTER_FREE: {
-               int *base, *again;
-               size_t len = 1024;
-               /*
-                * The slub allocator uses the first word to store the free
-                * pointer in some configurations. Use the middle of the
-                * allocation to avoid running into the freelist
-                */
-               size_t offset = (len / sizeof(*base)) / 2;
-
-               base = kmalloc(len, GFP_KERNEL);
-               pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
-               pr_info("Attempting bad write to freed memory at %p\n",
-                       &base[offset]);
-               kfree(base);
-               base[offset] = 0x0abcdef0;
-               /* Attempt to notice the overwrite. */
-               again = kmalloc(len, GFP_KERNEL);
-               kfree(again);
-               if (again != base)
-                       pr_info("Hmm, didn't get the same memory range.\n");
-
-               break;
-       }
-       case CT_READ_AFTER_FREE: {
-               int *base, *val, saw;
-               size_t len = 1024;
-               /*
-                * The slub allocator uses the first word to store the free
-                * pointer in some configurations. Use the middle of the
-                * allocation to avoid running into the freelist
-                */
-               size_t offset = (len / sizeof(*base)) / 2;
-
-               base = kmalloc(len, GFP_KERNEL);
-               if (!base)
-                       break;
-
-               val = kmalloc(len, GFP_KERNEL);
-               if (!val) {
-                       kfree(base);
-                       break;
-               }
-
-               *val = 0x12345678;
-               base[offset] = *val;
-               pr_info("Value in memory before free: %x\n", base[offset]);
-
-               kfree(base);
-
-               pr_info("Attempting bad read from freed memory\n");
-               saw = base[offset];
-               if (saw != *val) {
-                       /* Good! Poisoning happened, so declare a win. */
-                       pr_info("Memory correctly poisoned (%x)\n", saw);
-                       BUG();
-               }
-               pr_info("Memory was not poisoned\n");
-
-               kfree(val);
-               break;
-       }
-       case CT_WRITE_BUDDY_AFTER_FREE: {
-               unsigned long p = __get_free_page(GFP_KERNEL);
-               if (!p)
-                       break;
-               pr_info("Writing to the buddy page before free\n");
-               memset((void *)p, 0x3, PAGE_SIZE);
-               free_page(p);
-               schedule();
-               pr_info("Attempting bad write to the buddy page after free\n");
-               memset((void *)p, 0x78, PAGE_SIZE);
-               /* Attempt to notice the overwrite. */
-               p = __get_free_page(GFP_KERNEL);
-               free_page(p);
-               schedule();
-
-               break;
-       }
-       case CT_READ_BUDDY_AFTER_FREE: {
-               unsigned long p = __get_free_page(GFP_KERNEL);
-               int saw, *val;
-               int *base;
-
-               if (!p)
-                       break;
-
-               val = kmalloc(1024, GFP_KERNEL);
-               if (!val) {
-                       free_page(p);
-                       break;
-               }
-
-               base = (int *)p;
-
-               *val = 0x12345678;
-               base[0] = *val;
-               pr_info("Value in memory before free: %x\n", base[0]);
-               free_page(p);
-               pr_info("Attempting to read from freed memory\n");
-               saw = base[0];
-               if (saw != *val) {
-                       /* Good! Poisoning happened, so declare a win. */
-                       pr_info("Memory correctly poisoned (%x)\n", saw);
-                       BUG();
-               }
-               pr_info("Buddy page was not poisoned\n");
-
-               kfree(val);
-               break;
-       }
-       case CT_SOFTLOCKUP:
-               preempt_disable();
-               for (;;)
-                       cpu_relax();
-               break;
-       case CT_HARDLOCKUP:
-               local_irq_disable();
-               for (;;)
-                       cpu_relax();
-               break;
-       case CT_SPINLOCKUP:
-               /* Must be called twice to trigger. */
-               spin_lock(&lock_me_up);
-               /* Let sparse know we intended to exit holding the lock. */
-               __release(&lock_me_up);
-               break;
-       case CT_HUNG_TASK:
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule();
-               break;
-       case CT_EXEC_DATA:
-               execute_location(data_area);
-               break;
-       case CT_EXEC_STACK: {
-               u8 stack_area[EXEC_SIZE];
-               execute_location(stack_area);
-               break;
-       }
-       case CT_EXEC_KMALLOC: {
-               u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
-               execute_location(kmalloc_area);
-               kfree(kmalloc_area);
-               break;
-       }
-       case CT_EXEC_VMALLOC: {
-               u32 *vmalloc_area = vmalloc(EXEC_SIZE);
-               execute_location(vmalloc_area);
-               vfree(vmalloc_area);
-               break;
-       }
-       case CT_EXEC_USERSPACE: {
-               unsigned long user_addr;
-
-               user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
-                                   PROT_READ | PROT_WRITE | PROT_EXEC,
-                                   MAP_ANONYMOUS | MAP_PRIVATE, 0);
-               if (user_addr >= TASK_SIZE) {
-                       pr_warn("Failed to allocate user memory\n");
-                       return;
-               }
-               execute_user_location((void *)user_addr);
-               vm_munmap(user_addr, PAGE_SIZE);
-               break;
-       }
-       case CT_ACCESS_USERSPACE: {
-               unsigned long user_addr, tmp = 0;
-               unsigned long *ptr;
-
-               user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
-                                   PROT_READ | PROT_WRITE | PROT_EXEC,
-                                   MAP_ANONYMOUS | MAP_PRIVATE, 0);
-               if (user_addr >= TASK_SIZE) {
-                       pr_warn("Failed to allocate user memory\n");
-                       return;
-               }
-
-               if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
-                       pr_warn("copy_to_user failed\n");
-                       vm_munmap(user_addr, PAGE_SIZE);
-                       return;
-               }
-
-               ptr = (unsigned long *)user_addr;
-
-               pr_info("attempting bad read at %p\n", ptr);
-               tmp = *ptr;
-               tmp += 0xc0dec0de;
-
-               pr_info("attempting bad write at %p\n", ptr);
-               *ptr = tmp;
-
-               vm_munmap(user_addr, PAGE_SIZE);
-
-               break;
-       }
-       case CT_WRITE_RO: {
-               /* Explicitly cast away "const" for the test. */
-               unsigned long *ptr = (unsigned long *)&rodata;
-
-               pr_info("attempting bad rodata write at %p\n", ptr);
-               *ptr ^= 0xabcd1234;
-
-               break;
-       }
-       case CT_WRITE_RO_AFTER_INIT: {
-               unsigned long *ptr = &ro_after_init;
-
-               /*
-                * Verify we were written to during init. Since an Oops
-                * is considered a "success", a failure is to just skip the
-                * real test.
-                */
-               if ((*ptr & 0xAA) != 0xAA) {
-                       pr_info("%p was NOT written during init!?\n", ptr);
-                       break;
-               }
-
-               pr_info("attempting bad ro_after_init write at %p\n", ptr);
-               *ptr ^= 0xabcd1234;
-
-               break;
-       }
-       case CT_WRITE_KERN: {
-               size_t size;
-               unsigned char *ptr;
-
-               size = (unsigned long)do_overwritten -
-                      (unsigned long)do_nothing;
-               ptr = (unsigned char *)do_overwritten;
-
-               pr_info("attempting bad %zu byte write at %p\n", size, ptr);
-               memcpy(ptr, (unsigned char *)do_nothing, size);
-               flush_icache_range((unsigned long)ptr,
-                                  (unsigned long)(ptr + size));
-
-               do_overwritten();
-               break;
-       }
-       case CT_WRAP_ATOMIC: {
-               atomic_t under = ATOMIC_INIT(INT_MIN);
-               atomic_t over = ATOMIC_INIT(INT_MAX);
-
-               pr_info("attempting atomic underflow\n");
-               atomic_dec(&under);
-               pr_info("attempting atomic overflow\n");
-               atomic_inc(&over);
-
-               return;
-       }
-       case CT_NONE:
-       default:
-               break;
-       }
-
-}
-
-static void lkdtm_handler(void)
-{
-       unsigned long flags;
-       bool do_it = false;
-
-       spin_lock_irqsave(&count_lock, flags);
-       count--;
-       pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
-               cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
-
-       if (count == 0) {
-               do_it = true;
-               count = cpoint_count;
-       }
-       spin_unlock_irqrestore(&count_lock, flags);
-
-       if (do_it)
-               lkdtm_do_action(cptype);
-}
-
-static int lkdtm_register_cpoint(enum cname which)
-{
-       int ret;
-
-       cpoint = CN_INVALID;
-       if (lkdtm.entry != NULL)
-               unregister_jprobe(&lkdtm);
-
-       switch (which) {
-       case CN_DIRECT:
-               lkdtm_do_action(cptype);
-               return 0;
-       case CN_INT_HARDWARE_ENTRY:
-               lkdtm.kp.symbol_name = "do_IRQ";
-               lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
-               break;
-       case CN_INT_HW_IRQ_EN:
-               lkdtm.kp.symbol_name = "handle_IRQ_event";
-               lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
-               break;
-       case CN_INT_TASKLET_ENTRY:
-               lkdtm.kp.symbol_name = "tasklet_action";
-               lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
-               break;
-       case CN_FS_DEVRW:
-               lkdtm.kp.symbol_name = "ll_rw_block";
-               lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
-               break;
-       case CN_MEM_SWAPOUT:
-               lkdtm.kp.symbol_name = "shrink_inactive_list";
-               lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
-               break;
-       case CN_TIMERADD:
-               lkdtm.kp.symbol_name = "hrtimer_start";
-               lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
-               break;
-       case CN_SCSI_DISPATCH_CMD:
-               lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
-               lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
-               break;
-       case CN_IDE_CORE_CP:
-#ifdef CONFIG_IDE
-               lkdtm.kp.symbol_name = "generic_ide_ioctl";
-               lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
-#else
-               pr_info("Crash point not available\n");
-               return -EINVAL;
-#endif
-               break;
-       default:
-               pr_info("Invalid Crash Point\n");
-               return -EINVAL;
-       }
-
-       cpoint = which;
-       if ((ret = register_jprobe(&lkdtm)) < 0) {
-               pr_info("Couldn't register jprobe\n");
-               cpoint = CN_INVALID;
-       }
-
-       return ret;
-}
-
-static ssize_t do_register_entry(enum cname which, struct file *f,
-               const char __user *user_buf, size_t count, loff_t *off)
-{
-       char *buf;
-       int err;
-
-       if (count >= PAGE_SIZE)
-               return -EINVAL;
-
-       buf = (char *)__get_free_page(GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-       if (copy_from_user(buf, user_buf, count)) {
-               free_page((unsigned long) buf);
-               return -EFAULT;
-       }
-       /* NULL-terminate and remove enter */
-       buf[count] = '\0';
-       strim(buf);
-
-       cptype = parse_cp_type(buf, count);
-       free_page((unsigned long) buf);
-
-       if (cptype == CT_NONE)
-               return -EINVAL;
-
-       err = lkdtm_register_cpoint(which);
-       if (err < 0)
-               return err;
-
-       *off += count;
-
-       return count;
-}
-
-/* Generic read callback that just prints out the available crash types */
-static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
-               size_t count, loff_t *off)
-{
-       char *buf;
-       int i, n, out;
-
-       buf = (char *)__get_free_page(GFP_KERNEL);
-       if (buf == NULL)
-               return -ENOMEM;
-
-       n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
-       for (i = 0; i < ARRAY_SIZE(cp_type); i++)
-               n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
-       buf[n] = '\0';
-
-       out = simple_read_from_buffer(user_buf, count, off,
-                                     buf, n);
-       free_page((unsigned long) buf);
-
-       return out;
-}
-
-static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
-{
-       return 0;
-}
-
-
-static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
-               size_t count, loff_t *off)
-{
-       return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
-}
-
-static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
-               size_t count, loff_t *off)
-{
-       return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
-}
-
-static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
-               size_t count, loff_t *off)
-{
-       return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
-}
-
-static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
-               size_t count, loff_t *off)
-{
-       return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
-}
-
-static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
-               size_t count, loff_t *off)
-{
-       return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
-}
-
-static ssize_t timeradd_entry(struct file *f, const char __user *buf,
-               size_t count, loff_t *off)
-{
-       return do_register_entry(CN_TIMERADD, f, buf, count, off);
-}
-
-static ssize_t scsi_dispatch_cmd_entry(struct file *f,
-               const char __user *buf, size_t count, loff_t *off)
-{
-       return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
-}
-
-static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
-               size_t count, loff_t *off)
-{
-       return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
-}
-
-/* Special entry to just crash directly. Available without KPROBEs */
-static ssize_t direct_entry(struct file *f, const char __user *user_buf,
-               size_t count, loff_t *off)
-{
-       enum ctype type;
-       char *buf;
-
-       if (count >= PAGE_SIZE)
-               return -EINVAL;
-       if (count < 1)
-               return -EINVAL;
-
-       buf = (char *)__get_free_page(GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-       if (copy_from_user(buf, user_buf, count)) {
-               free_page((unsigned long) buf);
-               return -EFAULT;
-       }
-       /* NULL-terminate and remove enter */
-       buf[count] = '\0';
-       strim(buf);
-
-       type = parse_cp_type(buf, count);
-       free_page((unsigned long) buf);
-       if (type == CT_NONE)
-               return -EINVAL;
-
-       pr_info("Performing direct entry %s\n", cp_type_to_str(type));
-       lkdtm_do_action(type);
-       *off += count;
-
-       return count;
-}
-
-struct crash_entry {
-       const char *name;
-       const struct file_operations fops;
-};
-
-static const struct crash_entry crash_entries[] = {
-       {"DIRECT", {.read = lkdtm_debugfs_read,
-                       .llseek = generic_file_llseek,
-                       .open = lkdtm_debugfs_open,
-                       .write = direct_entry} },
-       {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
-                       .llseek = generic_file_llseek,
-                       .open = lkdtm_debugfs_open,
-                       .write = int_hardware_entry} },
-       {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
-                       .llseek = generic_file_llseek,
-                       .open = lkdtm_debugfs_open,
-                       .write = int_hw_irq_en} },
-       {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
-                       .llseek = generic_file_llseek,
-                       .open = lkdtm_debugfs_open,
-                       .write = int_tasklet_entry} },
-       {"FS_DEVRW", {.read = lkdtm_debugfs_read,
-                       .llseek = generic_file_llseek,
-                       .open = lkdtm_debugfs_open,
-                       .write = fs_devrw_entry} },
-       {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
-                       .llseek = generic_file_llseek,
-                       .open = lkdtm_debugfs_open,
-                       .write = mem_swapout_entry} },
-       {"TIMERADD", {.read = lkdtm_debugfs_read,
-                       .llseek = generic_file_llseek,
-                       .open = lkdtm_debugfs_open,
-                       .write = timeradd_entry} },
-       {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
-                       .llseek = generic_file_llseek,
-                       .open = lkdtm_debugfs_open,
-                       .write = scsi_dispatch_cmd_entry} },
-       {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
-                       .llseek = generic_file_llseek,
-                       .open = lkdtm_debugfs_open,
-                       .write = ide_core_cp_entry} },
-};
-
-static struct dentry *lkdtm_debugfs_root;
-
-static int __init lkdtm_module_init(void)
-{
-       int ret = -EINVAL;
-       int n_debugfs_entries = 1; /* Assume only the direct entry */
-       int i;
-
-       /* Make sure we can write to __ro_after_init values during __init */
-       ro_after_init |= 0xAA;
-
-       /* Register debugfs interface */
-       lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
-       if (!lkdtm_debugfs_root) {
-               pr_err("creating root dir failed\n");
-               return -ENODEV;
-       }
-
-#ifdef CONFIG_KPROBES
-       n_debugfs_entries = ARRAY_SIZE(crash_entries);
-#endif
-
-       for (i = 0; i < n_debugfs_entries; i++) {
-               const struct crash_entry *cur = &crash_entries[i];
-               struct dentry *de;
-
-               de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
-                               NULL, &cur->fops);
-               if (de == NULL) {
-                       pr_err("could not create %s\n", cur->name);
-                       goto out_err;
-               }
-       }
-
-       if (lkdtm_parse_commandline() == -EINVAL) {
-               pr_info("Invalid command\n");
-               goto out_err;
-       }
-
-       if (cpoint != CN_INVALID && cptype != CT_NONE) {
-               ret = lkdtm_register_cpoint(cpoint);
-               if (ret < 0) {
-                       pr_info("Invalid crash point %d\n", cpoint);
-                       goto out_err;
-               }
-               pr_info("Crash point %s of type %s registered\n",
-                       cpoint_name, cpoint_type);
-       } else {
-               pr_info("No crash points registered, enable through debugfs\n");
-       }
-
-       return 0;
-
-out_err:
-       debugfs_remove_recursive(lkdtm_debugfs_root);
-       return ret;
-}
-
-static void __exit lkdtm_module_exit(void)
-{
-       debugfs_remove_recursive(lkdtm_debugfs_root);
-
-       unregister_jprobe(&lkdtm);
-       pr_info("Crash point unregistered\n");
-}
-
-module_init(lkdtm_module_init);
-module_exit(lkdtm_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Kprobe module for testing crash dumps");
diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h
new file mode 100644 (file)
index 0000000..fdf954c
--- /dev/null
@@ -0,0 +1,60 @@
+#ifndef __LKDTM_H
+#define __LKDTM_H
+
+#define pr_fmt(fmt) "lkdtm: " fmt
+
+#include <linux/kernel.h>
+
+/* lkdtm_bugs.c */
+void __init lkdtm_bugs_init(int *recur_param);
+void lkdtm_PANIC(void);
+void lkdtm_BUG(void);
+void lkdtm_WARNING(void);
+void lkdtm_EXCEPTION(void);
+void lkdtm_LOOP(void);
+void lkdtm_OVERFLOW(void);
+void lkdtm_CORRUPT_STACK(void);
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
+void lkdtm_SOFTLOCKUP(void);
+void lkdtm_HARDLOCKUP(void);
+void lkdtm_SPINLOCKUP(void);
+void lkdtm_HUNG_TASK(void);
+void lkdtm_ATOMIC_UNDERFLOW(void);
+void lkdtm_ATOMIC_OVERFLOW(void);
+
+/* lkdtm_heap.c */
+void lkdtm_OVERWRITE_ALLOCATION(void);
+void lkdtm_WRITE_AFTER_FREE(void);
+void lkdtm_READ_AFTER_FREE(void);
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
+void lkdtm_READ_BUDDY_AFTER_FREE(void);
+
+/* lkdtm_perms.c */
+void __init lkdtm_perms_init(void);
+void lkdtm_WRITE_RO(void);
+void lkdtm_WRITE_RO_AFTER_INIT(void);
+void lkdtm_WRITE_KERN(void);
+void lkdtm_EXEC_DATA(void);
+void lkdtm_EXEC_STACK(void);
+void lkdtm_EXEC_KMALLOC(void);
+void lkdtm_EXEC_VMALLOC(void);
+void lkdtm_EXEC_RODATA(void);
+void lkdtm_EXEC_USERSPACE(void);
+void lkdtm_ACCESS_USERSPACE(void);
+
+/* lkdtm_rodata.c */
+void lkdtm_rodata_do_nothing(void);
+
+/* lkdtm_usercopy.c */
+void __init lkdtm_usercopy_init(void);
+void __exit lkdtm_usercopy_exit(void);
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
+void lkdtm_USERCOPY_HEAP_FLAG_TO(void);
+void lkdtm_USERCOPY_HEAP_FLAG_FROM(void);
+void lkdtm_USERCOPY_STACK_FRAME_TO(void);
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
+void lkdtm_USERCOPY_STACK_BEYOND(void);
+void lkdtm_USERCOPY_KERNEL(void);
+
+#endif
diff --git a/drivers/misc/lkdtm_bugs.c b/drivers/misc/lkdtm_bugs.c
new file mode 100644 (file)
index 0000000..182ae18
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * This is for all the tests related to logic bugs (e.g. bad dereferences,
+ * bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
+ * lockups) along with other things that don't fit well into existing LKDTM
+ * test source files.
+ */
+#include "lkdtm.h"
+#include <linux/sched.h>
+
+/*
+ * Make sure our attempts to over run the kernel stack doesn't trigger
+ * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
+ * recurse past the end of THREAD_SIZE by default.
+ */
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
+#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
+#else
+#define REC_STACK_SIZE (THREAD_SIZE / 8)
+#endif
+#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
+
+static int recur_count = REC_NUM_DEFAULT;
+
+static DEFINE_SPINLOCK(lock_me_up);
+
+static int recursive_loop(int remaining)
+{
+       char buf[REC_STACK_SIZE];
+
+       /* Make sure compiler does not optimize this away. */
+       memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
+       if (!remaining)
+               return 0;
+       else
+               return recursive_loop(remaining - 1);
+}
+
+/* If the depth is negative, use the default, otherwise keep parameter. */
+void __init lkdtm_bugs_init(int *recur_param)
+{
+       if (*recur_param < 0)
+               *recur_param = recur_count;
+       else
+               recur_count = *recur_param;
+}
+
+void lkdtm_PANIC(void)
+{
+       panic("dumptest");
+}
+
+void lkdtm_BUG(void)
+{
+       BUG();
+}
+
+void lkdtm_WARNING(void)
+{
+       WARN_ON(1);
+}
+
+void lkdtm_EXCEPTION(void)
+{
+       *((int *) 0) = 0;
+}
+
+void lkdtm_LOOP(void)
+{
+       for (;;)
+               ;
+}
+
+void lkdtm_OVERFLOW(void)
+{
+       (void) recursive_loop(recur_count);
+}
+
+noinline void lkdtm_CORRUPT_STACK(void)
+{
+       /* Use default char array length that triggers stack protection. */
+       char data[8];
+
+       memset((void *)data, 0, 64);
+}
+
+void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
+{
+       static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
+       u32 *p;
+       u32 val = 0x12345678;
+
+       p = (u32 *)(data + 1);
+       if (*p == 0)
+               val = 0x87654321;
+       *p = val;
+}
+
+void lkdtm_SOFTLOCKUP(void)
+{
+       preempt_disable();
+       for (;;)
+               cpu_relax();
+}
+
+void lkdtm_HARDLOCKUP(void)
+{
+       local_irq_disable();
+       for (;;)
+               cpu_relax();
+}
+
+void lkdtm_SPINLOCKUP(void)
+{
+       /* Must be called twice to trigger. */
+       spin_lock(&lock_me_up);
+       /* Let sparse know we intended to exit holding the lock. */
+       __release(&lock_me_up);
+}
+
+void lkdtm_HUNG_TASK(void)
+{
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       schedule();
+}
+
+void lkdtm_ATOMIC_UNDERFLOW(void)
+{
+       atomic_t under = ATOMIC_INIT(INT_MIN);
+
+       pr_info("attempting good atomic increment\n");
+       atomic_inc(&under);
+       atomic_dec(&under);
+
+       pr_info("attempting bad atomic underflow\n");
+       atomic_dec(&under);
+}
+
+void lkdtm_ATOMIC_OVERFLOW(void)
+{
+       atomic_t over = ATOMIC_INIT(INT_MAX);
+
+       pr_info("attempting good atomic decrement\n");
+       atomic_dec(&over);
+       atomic_inc(&over);
+
+       pr_info("attempting bad atomic overflow\n");
+       atomic_inc(&over);
+}
diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c
new file mode 100644 (file)
index 0000000..f9154b8
--- /dev/null
@@ -0,0 +1,544 @@
+/*
+ * Linux Kernel Dump Test Module for testing kernel crashes conditions:
+ * induces system failures at predefined crashpoints and under predefined
+ * operational conditions in order to evaluate the reliability of kernel
+ * sanity checking and crash dumps obtained using different dumping
+ * solutions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Ankita Garg <ankita@in.ibm.com>
+ *
+ * It is adapted from the Linux Kernel Dump Test Tool by
+ * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
+ *
+ * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
+ *
+ * See Documentation/fault-injection/provoke-crashes.txt for instructions
+ */
+#include "lkdtm.h"
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/kprobes.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/slab.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/debugfs.h>
+
+#ifdef CONFIG_IDE
+#include <linux/ide.h>
+#endif
+
+#define DEFAULT_COUNT 10
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+               size_t count, loff_t *off);
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+                           size_t count, loff_t *off);
+
+#ifdef CONFIG_KPROBES
+static void lkdtm_handler(void);
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+                                  const char __user *user_buf,
+                                  size_t count, loff_t *off);
+
+
+/* jprobe entry point handlers. */
+static unsigned int jp_do_irq(unsigned int irq)
+{
+       lkdtm_handler();
+       jprobe_return();
+       return 0;
+}
+
+static irqreturn_t jp_handle_irq_event(unsigned int irq,
+                                      struct irqaction *action)
+{
+       lkdtm_handler();
+       jprobe_return();
+       return 0;
+}
+
+static void jp_tasklet_action(struct softirq_action *a)
+{
+       lkdtm_handler();
+       jprobe_return();
+}
+
+static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+{
+       lkdtm_handler();
+       jprobe_return();
+}
+
+struct scan_control;
+
+static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
+                                            struct zone *zone,
+                                            struct scan_control *sc)
+{
+       lkdtm_handler();
+       jprobe_return();
+       return 0;
+}
+
+static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
+                           const enum hrtimer_mode mode)
+{
+       lkdtm_handler();
+       jprobe_return();
+       return 0;
+}
+
+static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
+{
+       lkdtm_handler();
+       jprobe_return();
+       return 0;
+}
+
+# ifdef CONFIG_IDE
+static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
+                       struct block_device *bdev, unsigned int cmd,
+                       unsigned long arg)
+{
+       lkdtm_handler();
+       jprobe_return();
+       return 0;
+}
+# endif
+#endif
+
+/* Crash points */
+struct crashpoint {
+       const char *name;
+       const struct file_operations fops;
+       struct jprobe jprobe;
+};
+
+#define CRASHPOINT(_name, _write, _symbol, _entry)             \
+       {                                                       \
+               .name = _name,                                  \
+               .fops = {                                       \
+                       .read   = lkdtm_debugfs_read,           \
+                       .llseek = generic_file_llseek,          \
+                       .open   = lkdtm_debugfs_open,           \
+                       .write  = _write,                       \
+               },                                              \
+               .jprobe = {                                     \
+                       .kp.symbol_name = _symbol,              \
+                       .entry = (kprobe_opcode_t *)_entry,     \
+               },                                              \
+       }
+
+/* Define the possible places where we can trigger a crash point. */
+struct crashpoint crashpoints[] = {
+       CRASHPOINT("DIRECT",                    direct_entry,
+                  NULL,                        NULL),
+#ifdef CONFIG_KPROBES
+       CRASHPOINT("INT_HARDWARE_ENTRY",        lkdtm_debugfs_entry,
+                  "do_IRQ",                    jp_do_irq),
+       CRASHPOINT("INT_HW_IRQ_EN",             lkdtm_debugfs_entry,
+                  "handle_IRQ_event",          jp_handle_irq_event),
+       CRASHPOINT("INT_TASKLET_ENTRY",         lkdtm_debugfs_entry,
+                  "tasklet_action",            jp_tasklet_action),
+       CRASHPOINT("FS_DEVRW",                  lkdtm_debugfs_entry,
+                  "ll_rw_block",               jp_ll_rw_block),
+       CRASHPOINT("MEM_SWAPOUT",               lkdtm_debugfs_entry,
+                  "shrink_inactive_list",      jp_shrink_inactive_list),
+       CRASHPOINT("TIMERADD",                  lkdtm_debugfs_entry,
+                  "hrtimer_start",             jp_hrtimer_start),
+       CRASHPOINT("SCSI_DISPATCH_CMD",         lkdtm_debugfs_entry,
+                  "scsi_dispatch_cmd",         jp_scsi_dispatch_cmd),
+# ifdef CONFIG_IDE
+       CRASHPOINT("IDE_CORE_CP",               lkdtm_debugfs_entry,
+                  "generic_ide_ioctl",         jp_generic_ide_ioctl),
+# endif
+#endif
+};
+
+
+/* Crash types. */
+struct crashtype {
+       const char *name;
+       void (*func)(void);
+};
+
+#define CRASHTYPE(_name)                       \
+       {                                       \
+               .name = __stringify(_name),     \
+               .func = lkdtm_ ## _name,        \
+       }
+
+/* Define the possible types of crashes that can be triggered. */
+struct crashtype crashtypes[] = {
+       CRASHTYPE(PANIC),
+       CRASHTYPE(BUG),
+       CRASHTYPE(WARNING),
+       CRASHTYPE(EXCEPTION),
+       CRASHTYPE(LOOP),
+       CRASHTYPE(OVERFLOW),
+       CRASHTYPE(CORRUPT_STACK),
+       CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
+       CRASHTYPE(OVERWRITE_ALLOCATION),
+       CRASHTYPE(WRITE_AFTER_FREE),
+       CRASHTYPE(READ_AFTER_FREE),
+       CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
+       CRASHTYPE(READ_BUDDY_AFTER_FREE),
+       CRASHTYPE(SOFTLOCKUP),
+       CRASHTYPE(HARDLOCKUP),
+       CRASHTYPE(SPINLOCKUP),
+       CRASHTYPE(HUNG_TASK),
+       CRASHTYPE(EXEC_DATA),
+       CRASHTYPE(EXEC_STACK),
+       CRASHTYPE(EXEC_KMALLOC),
+       CRASHTYPE(EXEC_VMALLOC),
+       CRASHTYPE(EXEC_RODATA),
+       CRASHTYPE(EXEC_USERSPACE),
+       CRASHTYPE(ACCESS_USERSPACE),
+       CRASHTYPE(WRITE_RO),
+       CRASHTYPE(WRITE_RO_AFTER_INIT),
+       CRASHTYPE(WRITE_KERN),
+       CRASHTYPE(ATOMIC_UNDERFLOW),
+       CRASHTYPE(ATOMIC_OVERFLOW),
+       CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
+       CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
+       CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
+       CRASHTYPE(USERCOPY_HEAP_FLAG_FROM),
+       CRASHTYPE(USERCOPY_STACK_FRAME_TO),
+       CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
+       CRASHTYPE(USERCOPY_STACK_BEYOND),
+       CRASHTYPE(USERCOPY_KERNEL),
+};
+
+
+/* Global jprobe entry and crashtype. */
+static struct jprobe *lkdtm_jprobe;
+struct crashpoint *lkdtm_crashpoint;
+struct crashtype *lkdtm_crashtype;
+
+/* Module parameters */
+static int recur_count = -1;
+module_param(recur_count, int, 0644);
+MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
+
+static char* cpoint_name;
+module_param(cpoint_name, charp, 0444);
+MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
+
+static char* cpoint_type;
+module_param(cpoint_type, charp, 0444);
+MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
+                               "hitting the crash point");
+
+static int cpoint_count = DEFAULT_COUNT;
+module_param(cpoint_count, int, 0644);
+MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
+                               "crash point is to be hit to trigger action");
+
+
+/* Return the crashtype number or NULL if the name is invalid */
+static struct crashtype *find_crashtype(const char *name)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+               if (!strcmp(name, crashtypes[i].name))
+                       return &crashtypes[i];
+       }
+
+       return NULL;
+}
+
+/*
+ * This is forced noinline just so it distinctly shows up in the stackdump
+ * which makes validation of expected lkdtm crashes easier.
+ */
+static noinline void lkdtm_do_action(struct crashtype *crashtype)
+{
+       BUG_ON(!crashtype || !crashtype->func);
+       crashtype->func();
+}
+
+static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
+                                struct crashtype *crashtype)
+{
+       int ret;
+
+       /* If this doesn't have a symbol, just call immediately. */
+       if (!crashpoint->jprobe.kp.symbol_name) {
+               lkdtm_do_action(crashtype);
+               return 0;
+       }
+
+       if (lkdtm_jprobe != NULL)
+               unregister_jprobe(lkdtm_jprobe);
+
+       lkdtm_crashpoint = crashpoint;
+       lkdtm_crashtype = crashtype;
+       lkdtm_jprobe = &crashpoint->jprobe;
+       ret = register_jprobe(lkdtm_jprobe);
+       if (ret < 0) {
+               pr_info("Couldn't register jprobe %s\n",
+                       crashpoint->jprobe.kp.symbol_name);
+               lkdtm_jprobe = NULL;
+               lkdtm_crashpoint = NULL;
+               lkdtm_crashtype = NULL;
+       }
+
+       return ret;
+}
+
+#ifdef CONFIG_KPROBES
+/* Global crash counter and spinlock. */
+static int crash_count = DEFAULT_COUNT;
+static DEFINE_SPINLOCK(crash_count_lock);
+
+/* Called by jprobe entry points. */
+static void lkdtm_handler(void)
+{
+       unsigned long flags;
+       bool do_it = false;
+
+       BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype);
+
+       spin_lock_irqsave(&crash_count_lock, flags);
+       crash_count--;
+       pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
+               lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
+
+       if (crash_count == 0) {
+               do_it = true;
+               crash_count = cpoint_count;
+       }
+       spin_unlock_irqrestore(&crash_count_lock, flags);
+
+       if (do_it)
+               lkdtm_do_action(lkdtm_crashtype);
+}
+
+static ssize_t lkdtm_debugfs_entry(struct file *f,
+                                  const char __user *user_buf,
+                                  size_t count, loff_t *off)
+{
+       struct crashpoint *crashpoint = file_inode(f)->i_private;
+       struct crashtype *crashtype = NULL;
+       char *buf;
+       int err;
+
+       if (count >= PAGE_SIZE)
+               return -EINVAL;
+
+       buf = (char *)__get_free_page(GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       if (copy_from_user(buf, user_buf, count)) {
+               free_page((unsigned long) buf);
+               return -EFAULT;
+       }
+       /* NULL-terminate and remove enter */
+       buf[count] = '\0';
+       strim(buf);
+
+       crashtype = find_crashtype(buf);
+       free_page((unsigned long)buf);
+
+       if (!crashtype)
+               return -EINVAL;
+
+       err = lkdtm_register_cpoint(crashpoint, crashtype);
+       if (err < 0)
+               return err;
+
+       *off += count;
+
+       return count;
+}
+#endif
+
+/* Generic read callback that just prints out the available crash types */
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+               size_t count, loff_t *off)
+{
+       char *buf;
+       int i, n, out;
+
+       buf = (char *)__get_free_page(GFP_KERNEL);
+       if (buf == NULL)
+               return -ENOMEM;
+
+       n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+       for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
+               n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
+                             crashtypes[i].name);
+       }
+       buf[n] = '\0';
+
+       out = simple_read_from_buffer(user_buf, count, off,
+                                     buf, n);
+       free_page((unsigned long) buf);
+
+       return out;
+}
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
+{
+       return 0;
+}
+
+/* Special entry to just crash directly. Available without KPROBEs */
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+               size_t count, loff_t *off)
+{
+       struct crashtype *crashtype;
+       char *buf;
+
+       if (count >= PAGE_SIZE)
+               return -EINVAL;
+       if (count < 1)
+               return -EINVAL;
+
+       buf = (char *)__get_free_page(GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       if (copy_from_user(buf, user_buf, count)) {
+               free_page((unsigned long) buf);
+               return -EFAULT;
+       }
+       /* NULL-terminate and remove enter */
+       buf[count] = '\0';
+       strim(buf);
+
+       crashtype = find_crashtype(buf);
+       free_page((unsigned long) buf);
+       if (!crashtype)
+               return -EINVAL;
+
+       pr_info("Performing direct entry %s\n", crashtype->name);
+       lkdtm_do_action(crashtype);
+       *off += count;
+
+       return count;
+}
+
+static struct dentry *lkdtm_debugfs_root;
+
+static int __init lkdtm_module_init(void)
+{
+       struct crashpoint *crashpoint = NULL;
+       struct crashtype *crashtype = NULL;
+       int ret = -EINVAL;
+       int i;
+
+       /* Neither or both of these need to be set */
+       if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
+               pr_err("Need both cpoint_type and cpoint_name or neither\n");
+               return -EINVAL;
+       }
+
+       if (cpoint_type) {
+               crashtype = find_crashtype(cpoint_type);
+               if (!crashtype) {
+                       pr_err("Unknown crashtype '%s'\n", cpoint_type);
+                       return -EINVAL;
+               }
+       }
+
+       if (cpoint_name) {
+               for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+                       if (!strcmp(cpoint_name, crashpoints[i].name))
+                               crashpoint = &crashpoints[i];
+               }
+
+               /* Refuse unknown crashpoints. */
+               if (!crashpoint) {
+                       pr_err("Invalid crashpoint %s\n", cpoint_name);
+                       return -EINVAL;
+               }
+       }
+
+#ifdef CONFIG_KPROBES
+       /* Set crash count. */
+       crash_count = cpoint_count;
+#endif
+
+       /* Handle test-specific initialization. */
+       lkdtm_bugs_init(&recur_count);
+       lkdtm_perms_init();
+       lkdtm_usercopy_init();
+
+       /* Register debugfs interface */
+       lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
+       if (!lkdtm_debugfs_root) {
+               pr_err("creating root dir failed\n");
+               return -ENODEV;
+       }
+
+       /* Install debugfs trigger files. */
+       for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
+               struct crashpoint *cur = &crashpoints[i];
+               struct dentry *de;
+
+               de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
+                                        cur, &cur->fops);
+               if (de == NULL) {
+                       pr_err("could not create crashpoint %s\n", cur->name);
+                       goto out_err;
+               }
+       }
+
+       /* Install crashpoint if one was selected. */
+       if (crashpoint) {
+               ret = lkdtm_register_cpoint(crashpoint, crashtype);
+               if (ret < 0) {
+                       pr_info("Invalid crashpoint %s\n", crashpoint->name);
+                       goto out_err;
+               }
+               pr_info("Crash point %s of type %s registered\n",
+                       crashpoint->name, cpoint_type);
+       } else {
+               pr_info("No crash points registered, enable through debugfs\n");
+       }
+
+       return 0;
+
+out_err:
+       debugfs_remove_recursive(lkdtm_debugfs_root);
+       return ret;
+}
+
+static void __exit lkdtm_module_exit(void)
+{
+       debugfs_remove_recursive(lkdtm_debugfs_root);
+
+       /* Handle test-specific clean-up. */
+       lkdtm_usercopy_exit();
+
+       unregister_jprobe(lkdtm_jprobe);
+       pr_info("Crash point unregistered\n");
+}
+
+module_init(lkdtm_module_init);
+module_exit(lkdtm_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kernel crash testing module");
diff --git a/drivers/misc/lkdtm_heap.c b/drivers/misc/lkdtm_heap.c
new file mode 100644 (file)
index 0000000..0f15816
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * This is for all the tests relating directly to heap memory, including
+ * page allocation and slab allocations.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+
+/*
+ * This tries to stay within the next largest power-of-2 kmalloc cache
+ * to avoid actually overwriting anything important if it's not detected
+ * correctly.
+ */
+void lkdtm_OVERWRITE_ALLOCATION(void)
+{
+       size_t len = 1020;
+       u32 *data = kmalloc(len, GFP_KERNEL);
+
+       data[1024 / sizeof(u32)] = 0x12345678;
+       kfree(data);
+}
+
+void lkdtm_WRITE_AFTER_FREE(void)
+{
+       int *base, *again;
+       size_t len = 1024;
+       /*
+        * The slub allocator uses the first word to store the free
+        * pointer in some configurations. Use the middle of the
+        * allocation to avoid running into the freelist
+        */
+       size_t offset = (len / sizeof(*base)) / 2;
+
+       base = kmalloc(len, GFP_KERNEL);
+       pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
+       pr_info("Attempting bad write to freed memory at %p\n",
+               &base[offset]);
+       kfree(base);
+       base[offset] = 0x0abcdef0;
+       /* Attempt to notice the overwrite. */
+       again = kmalloc(len, GFP_KERNEL);
+       kfree(again);
+       if (again != base)
+               pr_info("Hmm, didn't get the same memory range.\n");
+}
+
+void lkdtm_READ_AFTER_FREE(void)
+{
+       int *base, *val, saw;
+       size_t len = 1024;
+       /*
+        * The slub allocator uses the first word to store the free
+        * pointer in some configurations. Use the middle of the
+        * allocation to avoid running into the freelist
+        */
+       size_t offset = (len / sizeof(*base)) / 2;
+
+       base = kmalloc(len, GFP_KERNEL);
+       if (!base) {
+               pr_info("Unable to allocate base memory.\n");
+               return;
+       }
+
+       val = kmalloc(len, GFP_KERNEL);
+       if (!val) {
+               pr_info("Unable to allocate val memory.\n");
+               kfree(base);
+               return;
+       }
+
+       *val = 0x12345678;
+       base[offset] = *val;
+       pr_info("Value in memory before free: %x\n", base[offset]);
+
+       kfree(base);
+
+       pr_info("Attempting bad read from freed memory\n");
+       saw = base[offset];
+       if (saw != *val) {
+               /* Good! Poisoning happened, so declare a win. */
+               pr_info("Memory correctly poisoned (%x)\n", saw);
+               BUG();
+       }
+       pr_info("Memory was not poisoned\n");
+
+       kfree(val);
+}
+
+void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
+{
+       unsigned long p = __get_free_page(GFP_KERNEL);
+       if (!p) {
+               pr_info("Unable to allocate free page\n");
+               return;
+       }
+
+       pr_info("Writing to the buddy page before free\n");
+       memset((void *)p, 0x3, PAGE_SIZE);
+       free_page(p);
+       schedule();
+       pr_info("Attempting bad write to the buddy page after free\n");
+       memset((void *)p, 0x78, PAGE_SIZE);
+       /* Attempt to notice the overwrite. */
+       p = __get_free_page(GFP_KERNEL);
+       free_page(p);
+       schedule();
+}
+
+void lkdtm_READ_BUDDY_AFTER_FREE(void)
+{
+       unsigned long p = __get_free_page(GFP_KERNEL);
+       int saw, *val;
+       int *base;
+
+       if (!p) {
+               pr_info("Unable to allocate free page\n");
+               return;
+       }
+
+       val = kmalloc(1024, GFP_KERNEL);
+       if (!val) {
+               pr_info("Unable to allocate val memory.\n");
+               free_page(p);
+               return;
+       }
+
+       base = (int *)p;
+
+       *val = 0x12345678;
+       base[0] = *val;
+       pr_info("Value in memory before free: %x\n", base[0]);
+       free_page(p);
+       pr_info("Attempting to read from freed memory\n");
+       saw = base[0];
+       if (saw != *val) {
+               /* Good! Poisoning happened, so declare a win. */
+               pr_info("Memory correctly poisoned (%x)\n", saw);
+               BUG();
+       }
+       pr_info("Buddy page was not poisoned\n");
+
+       kfree(val);
+}
diff --git a/drivers/misc/lkdtm_perms.c b/drivers/misc/lkdtm_perms.c
new file mode 100644 (file)
index 0000000..45f1c0f
--- /dev/null
@@ -0,0 +1,199 @@
+/*
+ * This is for all the tests related to validating kernel memory
+ * permissions: non-executable regions, non-writable regions, and
+ * even non-readable regions.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+/* Whether or not to fill the target memory area with do_nothing(). */
+#define CODE_WRITE     true
+#define CODE_AS_IS     false
+
+/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
+#define EXEC_SIZE 64
+
+/* This is non-const, so it will end up in the .data section. */
+static u8 data_area[EXEC_SIZE];
+
+/* This is cost, so it will end up in the .rodata section. */
+static const unsigned long rodata = 0xAA55AA55;
+
+/* This is marked __ro_after_init, so it should ultimately be .rodata. */
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
+
+/*
+ * This just returns to the caller. It is designed to be copied into
+ * non-executable memory regions.
+ */
+static void do_nothing(void)
+{
+       return;
+}
+
+/* Must immediately follow do_nothing for size calculuations to work out. */
+static void do_overwritten(void)
+{
+       pr_info("do_overwritten wasn't overwritten!\n");
+       return;
+}
+
+static noinline void execute_location(void *dst, bool write)
+{
+       void (*func)(void) = dst;
+
+       pr_info("attempting ok execution at %p\n", do_nothing);
+       do_nothing();
+
+       if (write == CODE_WRITE) {
+               memcpy(dst, do_nothing, EXEC_SIZE);
+               flush_icache_range((unsigned long)dst,
+                                  (unsigned long)dst + EXEC_SIZE);
+       }
+       pr_info("attempting bad execution at %p\n", func);
+       func();
+}
+
+static void execute_user_location(void *dst)
+{
+       /* Intentionally crossing kernel/user memory boundary. */
+       void (*func)(void) = dst;
+
+       pr_info("attempting ok execution at %p\n", do_nothing);
+       do_nothing();
+
+       if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
+               return;
+       flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
+       pr_info("attempting bad execution at %p\n", func);
+       func();
+}
+
+void lkdtm_WRITE_RO(void)
+{
+       /* Explicitly cast away "const" for the test. */
+       unsigned long *ptr = (unsigned long *)&rodata;
+
+       pr_info("attempting bad rodata write at %p\n", ptr);
+       *ptr ^= 0xabcd1234;
+}
+
+void lkdtm_WRITE_RO_AFTER_INIT(void)
+{
+       unsigned long *ptr = &ro_after_init;
+
+       /*
+        * Verify we were written to during init. Since an Oops
+        * is considered a "success", a failure is to just skip the
+        * real test.
+        */
+       if ((*ptr & 0xAA) != 0xAA) {
+               pr_info("%p was NOT written during init!?\n", ptr);
+               return;
+       }
+
+       pr_info("attempting bad ro_after_init write at %p\n", ptr);
+       *ptr ^= 0xabcd1234;
+}
+
+void lkdtm_WRITE_KERN(void)
+{
+       size_t size;
+       unsigned char *ptr;
+
+       size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
+       ptr = (unsigned char *)do_overwritten;
+
+       pr_info("attempting bad %zu byte write at %p\n", size, ptr);
+       memcpy(ptr, (unsigned char *)do_nothing, size);
+       flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
+
+       do_overwritten();
+}
+
+void lkdtm_EXEC_DATA(void)
+{
+       execute_location(data_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_STACK(void)
+{
+       u8 stack_area[EXEC_SIZE];
+       execute_location(stack_area, CODE_WRITE);
+}
+
+void lkdtm_EXEC_KMALLOC(void)
+{
+       u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
+       execute_location(kmalloc_area, CODE_WRITE);
+       kfree(kmalloc_area);
+}
+
+void lkdtm_EXEC_VMALLOC(void)
+{
+       u32 *vmalloc_area = vmalloc(EXEC_SIZE);
+       execute_location(vmalloc_area, CODE_WRITE);
+       vfree(vmalloc_area);
+}
+
+void lkdtm_EXEC_RODATA(void)
+{
+       execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS);
+}
+
+void lkdtm_EXEC_USERSPACE(void)
+{
+       unsigned long user_addr;
+
+       user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+                           PROT_READ | PROT_WRITE | PROT_EXEC,
+                           MAP_ANONYMOUS | MAP_PRIVATE, 0);
+       if (user_addr >= TASK_SIZE) {
+               pr_warn("Failed to allocate user memory\n");
+               return;
+       }
+       execute_user_location((void *)user_addr);
+       vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void lkdtm_ACCESS_USERSPACE(void)
+{
+       unsigned long user_addr, tmp = 0;
+       unsigned long *ptr;
+
+       user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+                           PROT_READ | PROT_WRITE | PROT_EXEC,
+                           MAP_ANONYMOUS | MAP_PRIVATE, 0);
+       if (user_addr >= TASK_SIZE) {
+               pr_warn("Failed to allocate user memory\n");
+               return;
+       }
+
+       if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
+               pr_warn("copy_to_user failed\n");
+               vm_munmap(user_addr, PAGE_SIZE);
+               return;
+       }
+
+       ptr = (unsigned long *)user_addr;
+
+       pr_info("attempting bad read at %p\n", ptr);
+       tmp = *ptr;
+       tmp += 0xc0dec0de;
+
+       pr_info("attempting bad write at %p\n", ptr);
+       *ptr = tmp;
+
+       vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void __init lkdtm_perms_init(void)
+{
+       /* Make sure we can write to __ro_after_init values during __init */
+       ro_after_init |= 0xAA;
+
+}
diff --git a/drivers/misc/lkdtm_rodata.c b/drivers/misc/lkdtm_rodata.c
new file mode 100644 (file)
index 0000000..166b1db
--- /dev/null
@@ -0,0 +1,10 @@
+/*
+ * This includes functions that are meant to live entirely in .rodata
+ * (via objcopy tricks), to validate the non-executability of .rodata.
+ */
+#include "lkdtm.h"
+
+void lkdtm_rodata_do_nothing(void)
+{
+       /* Does nothing. We just want an architecture agnostic "return". */
+}
diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c
new file mode 100644 (file)
index 0000000..5a3fd76
--- /dev/null
@@ -0,0 +1,313 @@
+/*
+ * This is for all the tests related to copy_to_user() and copy_from_user()
+ * hardening.
+ */
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+static size_t cache_size = 1024;
+static struct kmem_cache *bad_cache;
+
+static const unsigned char test_text[] = "This is a test.\n";
+
+/*
+ * Instead of adding -Wno-return-local-addr, just pass the stack address
+ * through a function to obfuscate it from the compiler.
+ */
+static noinline unsigned char *trick_compiler(unsigned char *stack)
+{
+       return stack + 0;
+}
+
+static noinline unsigned char *do_usercopy_stack_callee(int value)
+{
+       unsigned char buf[32];
+       int i;
+
+       /* Exercise stack to avoid everything living in registers. */
+       for (i = 0; i < sizeof(buf); i++) {
+               buf[i] = value & 0xff;
+       }
+
+       return trick_compiler(buf);
+}
+
+static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
+{
+       unsigned long user_addr;
+       unsigned char good_stack[32];
+       unsigned char *bad_stack;
+       int i;
+
+       /* Exercise stack to avoid everything living in registers. */
+       for (i = 0; i < sizeof(good_stack); i++)
+               good_stack[i] = test_text[i % sizeof(test_text)];
+
+       /* This is a pointer to outside our current stack frame. */
+       if (bad_frame) {
+               bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
+       } else {
+               /* Put start address just inside stack. */
+               bad_stack = task_stack_page(current) + THREAD_SIZE;
+               bad_stack -= sizeof(unsigned long);
+       }
+
+       user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+                           PROT_READ | PROT_WRITE | PROT_EXEC,
+                           MAP_ANONYMOUS | MAP_PRIVATE, 0);
+       if (user_addr >= TASK_SIZE) {
+               pr_warn("Failed to allocate user memory\n");
+               return;
+       }
+
+       if (to_user) {
+               pr_info("attempting good copy_to_user of local stack\n");
+               if (copy_to_user((void __user *)user_addr, good_stack,
+                                sizeof(good_stack))) {
+                       pr_warn("copy_to_user failed unexpectedly?!\n");
+                       goto free_user;
+               }
+
+               pr_info("attempting bad copy_to_user of distant stack\n");
+               if (copy_to_user((void __user *)user_addr, bad_stack,
+                                sizeof(good_stack))) {
+                       pr_warn("copy_to_user failed, but lacked Oops\n");
+                       goto free_user;
+               }
+       } else {
+               /*
+                * There isn't a safe way to not be protected by usercopy
+                * if we're going to write to another thread's stack.
+                */
+               if (!bad_frame)
+                       goto free_user;
+
+               pr_info("attempting good copy_from_user of local stack\n");
+               if (copy_from_user(good_stack, (void __user *)user_addr,
+                                  sizeof(good_stack))) {
+                       pr_warn("copy_from_user failed unexpectedly?!\n");
+                       goto free_user;
+               }
+
+               pr_info("attempting bad copy_from_user of distant stack\n");
+               if (copy_from_user(bad_stack, (void __user *)user_addr,
+                                  sizeof(good_stack))) {
+                       pr_warn("copy_from_user failed, but lacked Oops\n");
+                       goto free_user;
+               }
+       }
+
+free_user:
+       vm_munmap(user_addr, PAGE_SIZE);
+}
+
+static void do_usercopy_heap_size(bool to_user)
+{
+       unsigned long user_addr;
+       unsigned char *one, *two;
+       const size_t size = 1024;
+
+       one = kmalloc(size, GFP_KERNEL);
+       two = kmalloc(size, GFP_KERNEL);
+       if (!one || !two) {
+               pr_warn("Failed to allocate kernel memory\n");
+               goto free_kernel;
+       }
+
+       user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+                           PROT_READ | PROT_WRITE | PROT_EXEC,
+                           MAP_ANONYMOUS | MAP_PRIVATE, 0);
+       if (user_addr >= TASK_SIZE) {
+               pr_warn("Failed to allocate user memory\n");
+               goto free_kernel;
+       }
+
+       memset(one, 'A', size);
+       memset(two, 'B', size);
+
+       if (to_user) {
+               pr_info("attempting good copy_to_user of correct size\n");
+               if (copy_to_user((void __user *)user_addr, one, size)) {
+                       pr_warn("copy_to_user failed unexpectedly?!\n");
+                       goto free_user;
+               }
+
+               pr_info("attempting bad copy_to_user of too large size\n");
+               if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
+                       pr_warn("copy_to_user failed, but lacked Oops\n");
+                       goto free_user;
+               }
+       } else {
+               pr_info("attempting good copy_from_user of correct size\n");
+               if (copy_from_user(one, (void __user *)user_addr, size)) {
+                       pr_warn("copy_from_user failed unexpectedly?!\n");
+                       goto free_user;
+               }
+
+               pr_info("attempting bad copy_from_user of too large size\n");
+               if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
+                       pr_warn("copy_from_user failed, but lacked Oops\n");
+                       goto free_user;
+               }
+       }
+
+free_user:
+       vm_munmap(user_addr, PAGE_SIZE);
+free_kernel:
+       kfree(one);
+       kfree(two);
+}
+
+static void do_usercopy_heap_flag(bool to_user)
+{
+       unsigned long user_addr;
+       unsigned char *good_buf = NULL;
+       unsigned char *bad_buf = NULL;
+
+       /* Make sure cache was prepared. */
+       if (!bad_cache) {
+               pr_warn("Failed to allocate kernel cache\n");
+               return;
+       }
+
+       /*
+        * Allocate one buffer from each cache (kmalloc will have the
+        * SLAB_USERCOPY flag already, but "bad_cache" won't).
+        */
+       good_buf = kmalloc(cache_size, GFP_KERNEL);
+       bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
+       if (!good_buf || !bad_buf) {
+               pr_warn("Failed to allocate buffers from caches\n");
+               goto free_alloc;
+       }
+
+       /* Allocate user memory we'll poke at. */
+       user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+                           PROT_READ | PROT_WRITE | PROT_EXEC,
+                           MAP_ANONYMOUS | MAP_PRIVATE, 0);
+       if (user_addr >= TASK_SIZE) {
+               pr_warn("Failed to allocate user memory\n");
+               goto free_alloc;
+       }
+
+       memset(good_buf, 'A', cache_size);
+       memset(bad_buf, 'B', cache_size);
+
+       if (to_user) {
+               pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
+               if (copy_to_user((void __user *)user_addr, good_buf,
+                                cache_size)) {
+                       pr_warn("copy_to_user failed unexpectedly?!\n");
+                       goto free_user;
+               }
+
+               pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
+               if (copy_to_user((void __user *)user_addr, bad_buf,
+                                cache_size)) {
+                       pr_warn("copy_to_user failed, but lacked Oops\n");
+                       goto free_user;
+               }
+       } else {
+               pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
+               if (copy_from_user(good_buf, (void __user *)user_addr,
+                                  cache_size)) {
+                       pr_warn("copy_from_user failed unexpectedly?!\n");
+                       goto free_user;
+               }
+
+               pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
+               if (copy_from_user(bad_buf, (void __user *)user_addr,
+                                  cache_size)) {
+                       pr_warn("copy_from_user failed, but lacked Oops\n");
+                       goto free_user;
+               }
+       }
+
+free_user:
+       vm_munmap(user_addr, PAGE_SIZE);
+free_alloc:
+       if (bad_buf)
+               kmem_cache_free(bad_cache, bad_buf);
+       kfree(good_buf);
+}
+
+/* Callable tests. */
+void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
+{
+       do_usercopy_heap_size(true);
+}
+
+void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
+{
+       do_usercopy_heap_size(false);
+}
+
+void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
+{
+       do_usercopy_heap_flag(true);
+}
+
+void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
+{
+       do_usercopy_heap_flag(false);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_TO(void)
+{
+       do_usercopy_stack(true, true);
+}
+
+void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
+{
+       do_usercopy_stack(false, true);
+}
+
+void lkdtm_USERCOPY_STACK_BEYOND(void)
+{
+       do_usercopy_stack(true, false);
+}
+
+void lkdtm_USERCOPY_KERNEL(void)
+{
+       unsigned long user_addr;
+
+       user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
+                           PROT_READ | PROT_WRITE | PROT_EXEC,
+                           MAP_ANONYMOUS | MAP_PRIVATE, 0);
+       if (user_addr >= TASK_SIZE) {
+               pr_warn("Failed to allocate user memory\n");
+               return;
+       }
+
+       pr_info("attempting good copy_to_user from kernel rodata\n");
+       if (copy_to_user((void __user *)user_addr, test_text,
+                        sizeof(test_text))) {
+               pr_warn("copy_to_user failed unexpectedly?!\n");
+               goto free_user;
+       }
+
+       pr_info("attempting bad copy_to_user from kernel text\n");
+       if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
+               pr_warn("copy_to_user failed, but lacked Oops\n");
+               goto free_user;
+       }
+
+free_user:
+       vm_munmap(user_addr, PAGE_SIZE);
+}
+
+void __init lkdtm_usercopy_init(void)
+{
+       /* Prepare cache that lacks SLAB_USERCOPY flag. */
+       bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
+                                     0, NULL);
+}
+
+void __exit lkdtm_usercopy_exit(void)
+{
+       kmem_cache_destroy(bad_cache);
+}
index 5aa606c8a827ae14df65de84f77d05ec4db252f2..085f3aafe6fa92f6ec749f7d5ed457edb1b31348 100644 (file)
@@ -132,6 +132,7 @@ static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
        hdr->length = length;
        hdr->msg_complete = 1;
        hdr->reserved = 0;
+       hdr->internal = 0;
 }
 
 /**
@@ -165,15 +166,15 @@ void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
  * Return: 0 on success, <0 on failure.
  */
 static inline
-int mei_hbm_cl_write(struct mei_device *dev,
-                    struct mei_cl *cl, u8 hbm_cmd, size_t len)
+int mei_hbm_cl_write(struct mei_device *dev, struct mei_cl *cl,
+                    u8 hbm_cmd, u8 *buf, size_t len)
 {
-       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+       struct mei_msg_hdr mei_hdr;
 
-       mei_hbm_hdr(mei_hdr, len);
-       mei_hbm_cl_hdr(cl, hbm_cmd, dev->wr_msg.data, len);
+       mei_hbm_hdr(&mei_hdr, len);
+       mei_hbm_cl_hdr(cl, hbm_cmd, buf, len);
 
-       return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+       return mei_write_message(dev, &mei_hdr, buf);
 }
 
 /**
@@ -250,24 +251,23 @@ int mei_hbm_start_wait(struct mei_device *dev)
  */
 int mei_hbm_start_req(struct mei_device *dev)
 {
-       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
-       struct hbm_host_version_request *start_req;
+       struct mei_msg_hdr mei_hdr;
+       struct hbm_host_version_request start_req;
        const size_t len = sizeof(struct hbm_host_version_request);
        int ret;
 
        mei_hbm_reset(dev);
 
-       mei_hbm_hdr(mei_hdr, len);
+       mei_hbm_hdr(&mei_hdr, len);
 
        /* host start message */
-       start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
-       memset(start_req, 0, len);
-       start_req->hbm_cmd = HOST_START_REQ_CMD;
-       start_req->host_version.major_version = HBM_MAJOR_VERSION;
-       start_req->host_version.minor_version = HBM_MINOR_VERSION;
+       memset(&start_req, 0, len);
+       start_req.hbm_cmd = HOST_START_REQ_CMD;
+       start_req.host_version.major_version = HBM_MAJOR_VERSION;
+       start_req.host_version.minor_version = HBM_MINOR_VERSION;
 
        dev->hbm_state = MEI_HBM_IDLE;
-       ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+       ret = mei_write_message(dev, &mei_hdr, &start_req);
        if (ret) {
                dev_err(dev->dev, "version message write failed: ret = %d\n",
                        ret);
@@ -288,23 +288,22 @@ int mei_hbm_start_req(struct mei_device *dev)
  */
 static int mei_hbm_enum_clients_req(struct mei_device *dev)
 {
-       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
-       struct hbm_host_enum_request *enum_req;
+       struct mei_msg_hdr mei_hdr;
+       struct hbm_host_enum_request enum_req;
        const size_t len = sizeof(struct hbm_host_enum_request);
        int ret;
 
        /* enumerate clients */
-       mei_hbm_hdr(mei_hdr, len);
+       mei_hbm_hdr(&mei_hdr, len);
 
-       enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
-       memset(enum_req, 0, len);
-       enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
-       enum_req->flags |= dev->hbm_f_dc_supported ?
-                          MEI_HBM_ENUM_F_ALLOW_ADD : 0;
-       enum_req->flags |= dev->hbm_f_ie_supported ?
-                          MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
+       memset(&enum_req, 0, len);
+       enum_req.hbm_cmd = HOST_ENUM_REQ_CMD;
+       enum_req.flags |= dev->hbm_f_dc_supported ?
+                         MEI_HBM_ENUM_F_ALLOW_ADD : 0;
+       enum_req.flags |= dev->hbm_f_ie_supported ?
+                         MEI_HBM_ENUM_F_IMMEDIATE_ENUM : 0;
 
-       ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+       ret = mei_write_message(dev, &mei_hdr, &enum_req);
        if (ret) {
                dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
                        ret);
@@ -358,23 +357,21 @@ static int mei_hbm_me_cl_add(struct mei_device *dev,
  */
 static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
 {
-       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
-       struct hbm_add_client_response *resp;
+       struct mei_msg_hdr mei_hdr;
+       struct hbm_add_client_response resp;
        const size_t len = sizeof(struct hbm_add_client_response);
        int ret;
 
        dev_dbg(dev->dev, "adding client response\n");
 
-       resp = (struct hbm_add_client_response *)dev->wr_msg.data;
+       mei_hbm_hdr(&mei_hdr, len);
 
-       mei_hbm_hdr(mei_hdr, len);
-       memset(resp, 0, sizeof(struct hbm_add_client_response));
+       memset(&resp, 0, sizeof(struct hbm_add_client_response));
+       resp.hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
+       resp.me_addr = addr;
+       resp.status  = status;
 
-       resp->hbm_cmd = MEI_HBM_ADD_CLIENT_RES_CMD;
-       resp->me_addr = addr;
-       resp->status  = status;
-
-       ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+       ret = mei_write_message(dev, &mei_hdr, &resp);
        if (ret)
                dev_err(dev->dev, "add client response write failed: ret = %d\n",
                        ret);
@@ -421,18 +418,17 @@ int mei_hbm_cl_notify_req(struct mei_device *dev,
                          struct mei_cl *cl, u8 start)
 {
 
-       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
-       struct hbm_notification_request *req;
+       struct mei_msg_hdr mei_hdr;
+       struct hbm_notification_request req;
        const size_t len = sizeof(struct hbm_notification_request);
        int ret;
 
-       mei_hbm_hdr(mei_hdr, len);
-       mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, dev->wr_msg.data, len);
+       mei_hbm_hdr(&mei_hdr, len);
+       mei_hbm_cl_hdr(cl, MEI_HBM_NOTIFY_REQ_CMD, &req, len);
 
-       req = (struct hbm_notification_request *)dev->wr_msg.data;
-       req->start = start;
+       req.start = start;
 
-       ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+       ret = mei_write_message(dev, &mei_hdr, &req);
        if (ret)
                dev_err(dev->dev, "notify request failed: ret = %d\n", ret);
 
@@ -534,8 +530,8 @@ static void mei_hbm_cl_notify(struct mei_device *dev,
  */
 static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
 {
-       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
-       struct hbm_props_request *prop_req;
+       struct mei_msg_hdr mei_hdr;
+       struct hbm_props_request prop_req;
        const size_t len = sizeof(struct hbm_props_request);
        unsigned long addr;
        int ret;
@@ -550,15 +546,14 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
                return 0;
        }
 
-       mei_hbm_hdr(mei_hdr, len);
-       prop_req = (struct hbm_props_request *)dev->wr_msg.data;
+       mei_hbm_hdr(&mei_hdr, len);
 
-       memset(prop_req, 0, sizeof(struct hbm_props_request));
+       memset(&prop_req, 0, sizeof(struct hbm_props_request));
 
-       prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
-       prop_req->me_addr = addr;
+       prop_req.hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+       prop_req.me_addr = addr;
 
-       ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+       ret = mei_write_message(dev, &mei_hdr, &prop_req);
        if (ret) {
                dev_err(dev->dev, "properties request write failed: ret = %d\n",
                        ret);
@@ -581,21 +576,20 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
  */
 int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
 {
-       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
-       struct hbm_power_gate *req;
+       struct mei_msg_hdr mei_hdr;
+       struct hbm_power_gate req;
        const size_t len = sizeof(struct hbm_power_gate);
        int ret;
 
        if (!dev->hbm_f_pg_supported)
                return -EOPNOTSUPP;
 
-       mei_hbm_hdr(mei_hdr, len);
+       mei_hbm_hdr(&mei_hdr, len);
 
-       req = (struct hbm_power_gate *)dev->wr_msg.data;
-       memset(req, 0, len);
-       req->hbm_cmd = pg_cmd;
+       memset(&req, 0, len);
+       req.hbm_cmd = pg_cmd;
 
-       ret = mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+       ret = mei_write_message(dev, &mei_hdr, &req);
        if (ret)
                dev_err(dev->dev, "power gate command write failed.\n");
        return ret;
@@ -611,18 +605,17 @@ EXPORT_SYMBOL_GPL(mei_hbm_pg);
  */
 static int mei_hbm_stop_req(struct mei_device *dev)
 {
-       struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
-       struct hbm_host_stop_request *req =
-                       (struct hbm_host_stop_request *)dev->wr_msg.data;
+       struct mei_msg_hdr mei_hdr;
+       struct hbm_host_stop_request req;
        const size_t len = sizeof(struct hbm_host_stop_request);
 
-       mei_hbm_hdr(mei_hdr, len);
+       mei_hbm_hdr(&mei_hdr, len);
 
-       memset(req, 0, len);
-       req->hbm_cmd = HOST_STOP_REQ_CMD;
-       req->reason = DRIVER_STOP_REQUEST;
+       memset(&req, 0, len);
+       req.hbm_cmd = HOST_STOP_REQ_CMD;
+       req.reason = DRIVER_STOP_REQUEST;
 
-       return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+       return mei_write_message(dev, &mei_hdr, &req);
 }
 
 /**
@@ -636,9 +629,10 @@ static int mei_hbm_stop_req(struct mei_device *dev)
 int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
 {
        const size_t len = sizeof(struct hbm_flow_control);
+       u8 buf[len];
 
        cl_dbg(dev, cl, "sending flow control\n");
-       return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, len);
+       return mei_hbm_cl_write(dev, cl, MEI_FLOW_CONTROL_CMD, buf, len);
 }
 
 /**
@@ -714,8 +708,9 @@ static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
 int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
 {
        const size_t len = sizeof(struct hbm_client_connect_request);
+       u8 buf[len];
 
-       return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, len);
+       return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_REQ_CMD, buf, len);
 }
 
 /**
@@ -729,8 +724,9 @@ int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
 int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
 {
        const size_t len = sizeof(struct hbm_client_connect_response);
+       u8 buf[len];
 
-       return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, len);
+       return mei_hbm_cl_write(dev, cl, CLIENT_DISCONNECT_RES_CMD, buf, len);
 }
 
 /**
@@ -765,8 +761,9 @@ static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
 int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
 {
        const size_t len = sizeof(struct hbm_client_connect_request);
+       u8 buf[len];
 
-       return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, len);
+       return mei_hbm_cl_write(dev, cl, CLIENT_CONNECT_REQ_CMD, buf, len);
 }
 
 /**
index c9e01021eadf4e20098b6855b2c86a880a00c817..e5e32503d4bc44d9e03283f45910b271ba836a68 100644 (file)
@@ -382,7 +382,6 @@ const char *mei_pg_state_str(enum mei_pg_state state);
  *
  * @hbuf_depth  : depth of hardware host/write buffer is slots
  * @hbuf_is_ready : query if the host host/write buffer is ready
- * @wr_msg      : the buffer for hbm control messages
  *
  * @version     : HBM protocol version in use
  * @hbm_f_pg_supported  : hbm feature pgi protocol
@@ -467,12 +466,6 @@ struct mei_device {
        u8 hbuf_depth;
        bool hbuf_is_ready;
 
-       /* used for control messages */
-       struct {
-               struct mei_msg_hdr hdr;
-               unsigned char data[128];
-       } wr_msg;
-
        struct hbm_version version;
        unsigned int hbm_f_pg_supported:1;
        unsigned int hbm_f_dc_supported:1;
@@ -670,8 +663,7 @@ static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
 }
 
 static inline int mei_write_message(struct mei_device *dev,
-                       struct mei_msg_hdr *hdr,
-                       unsigned char *buf)
+                       struct mei_msg_hdr *hdr, void *buf)
 {
        return dev->ops->write(dev, hdr, buf);
 }
index e62fde3ac431c111ec945d4aba9c877f6308d81d..c5472e3c923126097fd93f2abf31402a1717c228 100644 (file)
@@ -355,8 +355,10 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
                goto idata_err;
        }
 
-       if (!idata->buf_bytes)
+       if (!idata->buf_bytes) {
+               idata->buf = NULL;
                return idata;
+       }
 
        idata->buf = kmalloc(idata->buf_bytes, GFP_KERNEL);
        if (!idata->buf) {
@@ -1786,8 +1788,8 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
 
        packed_cmd_hdr = packed->cmd_hdr;
        memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
-       packed_cmd_hdr[0] = (packed->nr_entries << 16) |
-               (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+       packed_cmd_hdr[0] = cpu_to_le32((packed->nr_entries << 16) |
+               (PACKED_CMD_WR << 8) | PACKED_CMD_VER);
        hdr_blocks = mmc_large_sector(card) ? 8 : 1;
 
        /*
@@ -1801,14 +1803,14 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
                        ((brq->data.blocks * brq->data.blksz) >=
                         card->ext_csd.data_tag_unit_size);
                /* Argument of CMD23 */
-               packed_cmd_hdr[(i * 2)] =
+               packed_cmd_hdr[(i * 2)] = cpu_to_le32(
                        (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
                        (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
-                       blk_rq_sectors(prq);
+                       blk_rq_sectors(prq));
                /* Argument of CMD18 or CMD25 */
-               packed_cmd_hdr[((i * 2)) + 1] =
+               packed_cmd_hdr[((i * 2)) + 1] = cpu_to_le32(
                        mmc_card_blockaddr(card) ?
-                       blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+                       blk_rq_pos(prq) : blk_rq_pos(prq) << 9);
                packed->blocks += blk_rq_sectors(prq);
                i++;
        }
index 03ddf0ecf4023c8ab09b9e516640626659eb1ae5..684087db170b218c45dfc0bb45e9479b7e61014f 100644 (file)
@@ -1068,8 +1068,6 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
        jz4740_mmc_clock_disable(host);
        setup_timer(&host->timeout_timer, jz4740_mmc_timeout,
                        (unsigned long)host);
-       /* It is not important when it times out, it just needs to timeout. */
-       set_timer_slack(&host->timeout_timer, HZ);
 
        host->use_dma = true;
        if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0)
index 86fac3e8683378acde59d1da87a0fad3878bf481..c763b404510f3a29d3864290297b22f596701cc3 100644 (file)
@@ -789,14 +789,16 @@ static int pxamci_probe(struct platform_device *pdev)
                gpio_direction_output(gpio_power,
                                      host->pdata->gpio_power_invert);
        }
-       if (gpio_is_valid(gpio_ro))
+       if (gpio_is_valid(gpio_ro)) {
                ret = mmc_gpio_request_ro(mmc, gpio_ro);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
-               goto out;
-       } else {
-               mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
-                       0 : MMC_CAP2_RO_ACTIVE_HIGH;
+               if (ret) {
+                       dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n",
+                               gpio_ro);
+                       goto out;
+               } else {
+                       mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
+                               0 : MMC_CAP2_RO_ACTIVE_HIGH;
+               }
        }
 
        if (gpio_is_valid(gpio_cd))
index 458ffb7637e5f902984d4727a3ada9a1251b5d56..008709c5cb09d907065984762fb497164fdec531 100644 (file)
@@ -43,6 +43,7 @@
 
 #ifdef CONFIG_X86
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include <asm/iosf_mbi.h>
 #endif
 
@@ -126,7 +127,7 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
 static bool sdhci_acpi_byt(void)
 {
        static const struct x86_cpu_id byt[] = {
-               { X86_VENDOR_INTEL, 6, 0x37 },
+               { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
                {}
        };
 
index 08e158895635cddda0bbea4e6973cfb8dd727e74..a136da8df6fe897d4f908d2723d4a95d8b152933 100644 (file)
@@ -1657,8 +1657,11 @@ static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
 
        /* detect availability of ELM module. Won't be present pre-OMAP4 */
        info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
-       if (!info->elm_of_node)
-               dev_dbg(dev, "ti,elm-id not in DT\n");
+       if (!info->elm_of_node) {
+               info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+               if (!info->elm_of_node)
+                       dev_dbg(dev, "ti,elm-id not in DT\n");
+       }
 
        /* select ecc-scheme for NAND */
        if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
index ca81f46ea1aa9b64cd657171a3982c2dbc8b2741..edc70ffad6607ac06d0a40b48316bef554c5f4c2 100644 (file)
@@ -101,11 +101,14 @@ enum ad_link_speed_type {
 #define MAC_ADDRESS_EQUAL(A, B)        \
        ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
 
-static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
+static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+       0, 0, 0, 0, 0, 0
+};
 static u16 ad_ticks_per_sec;
 static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
 
-static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+       MULTICAST_LACPDU_ADDR;
 
 /* ================= main 802.3ad protocol functions ================== */
 static int ad_lacpdu_send(struct port *port);
@@ -1739,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
                aggregator->is_individual = false;
                aggregator->actor_admin_aggregator_key = 0;
                aggregator->actor_oper_aggregator_key = 0;
-               aggregator->partner_system = null_mac_addr;
+               eth_zero_addr(aggregator->partner_system.mac_addr_value);
                aggregator->partner_system_priority = 0;
                aggregator->partner_oper_aggregator_key = 0;
                aggregator->receive_state = 0;
@@ -1761,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
        if (aggregator) {
                ad_clear_agg(aggregator);
 
-               aggregator->aggregator_mac_address = null_mac_addr;
+               eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
                aggregator->aggregator_identifier = 0;
                aggregator->slave = NULL;
        }
index c5ac160a8ae954d1104084c3d94795725cc261ae..551f0f8dead3945cf1516aeff01afe6c74117072 100644 (file)
 
 
 
-#ifndef __long_aligned
-#define __long_aligned __attribute__((aligned((sizeof(long)))))
-#endif
-static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
        0xff, 0xff, 0xff, 0xff, 0xff, 0xff
 };
-static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
        0x33, 0x33, 0x00, 0x00, 0x00, 0x01
 };
 static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
index 941ec99cd3b69b6c9d3596e5b65b6aa89bc960c7..a2afa3be17a4bcc4e662a17e49f5c6bc31bce4d3 100644 (file)
@@ -1584,6 +1584,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        }
 
        /* check for initial state */
+       new_slave->link = BOND_LINK_NOCHANGE;
        if (bond->params.miimon) {
                if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
                        if (bond->params.updelay) {
index db760e84119fcb970b7b34f7c4fac92b1acfed52..b8df0f5e8c25ae35b1ce368b04536c40761a1c96 100644 (file)
@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
        if (err < 0)
                return err;
 
-       return register_netdevice(bond_dev);
+       err = register_netdevice(bond_dev);
+
+       netif_carrier_off(bond_dev);
+
+       return err;
 }
 
 static size_t bond_get_size(const struct net_device *bond_dev)
index 30defe6c81f22d6463c4df091a421014392f6ff2..821d86c38ab214a9c450f3079398b1d3a4af2e24 100644 (file)
@@ -3851,7 +3851,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
        unsigned long flags;
 
        /* If the device is closed, ignore the timeout */
-       if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
+       if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
                return;
 
        /* Any nonrecoverable hardware error?
index 08a23e6b60e947894783f2115c2fe16abece84bc..1a3555d03a96a823e4936c06a2e9ce64d27ae851 100644 (file)
@@ -259,6 +259,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
                if (err) {
                        netdev_err(dev, "rx buffer allocation failed\n");
                        dev->stats.rx_dropped++;
+                       dev_kfree_skb(skb);
                        return;
                }
 
index 543bf38105c9240d9ae374708377755c5e4db9a6..bfa26a2590c979b41bc6c46aee275d730e1ce89d 100644 (file)
@@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
                else
                        p = (char *)priv;
                p += s->stat_offset;
-               data[i] = *(u32 *)p;
+               data[i] = *(unsigned long *)p;
        }
 }
 
index a6333d38ecc02be43614e02d69da3910127b0e9f..25bbae5928d43a4d911d0d5852612d8524437cdb 100644 (file)
@@ -231,7 +231,7 @@ err_dma:
        dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
                         DMA_TO_DEVICE);
 
-       while (i > 0) {
+       while (i-- > 0) {
                int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
                struct bgmac_slot_info *slot = &ring->slots[index];
                u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
index a38cb047b54083897fa6e8df5c098c7e1b98d7ac..1b0ae4a72e9ecd81a1d404c5e6edadb3f00ce8cc 100644 (file)
@@ -1591,7 +1591,7 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
 {
        struct bnxt *bp = netdev_priv(dev);
        u16  start = eeprom->offset, length = eeprom->len;
-       int rc;
+       int rc = 0;
 
        memset(data, 0, eeprom->len);
 
index 8de79ae63231b0ad21e47edb9a150e026adf6bd3..0e7e7da8d201c0b3af122c542ef42467c03cb94c 100644 (file)
@@ -2821,7 +2821,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
                if (!g) {
                        netif_info(lio, tx_err, lio->netdev,
                                   "Transmit scatter gather: glist null!\n");
-                       goto lio_xmit_failed;
+                       goto lio_xmit_dma_failed;
                }
 
                cmdsetup.s.gather = 1;
@@ -2892,7 +2892,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
        else
                status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
        if (status == IQ_SEND_FAILED)
-               goto lio_xmit_failed;
+               goto lio_xmit_dma_failed;
 
        netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
 
@@ -2906,12 +2906,13 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        return NETDEV_TX_OK;
 
+lio_xmit_dma_failed:
+       dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+                        ndata.datasize, DMA_TO_DEVICE);
 lio_xmit_failed:
        stats->tx_dropped++;
        netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
                   iq_no, stats->tx_dropped);
-       dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
-                        ndata.datasize, DMA_TO_DEVICE);
        recv_buffer_free(skb);
        return NETDEV_TX_OK;
 }
index c4b262ca7d43623fba1cef20dfc6c03ebeadfd7f..2accab38632327ae007baf1ef293dca1ef7da7d9 100644 (file)
@@ -36,8 +36,8 @@
 #define __T4FW_VERSION_H__
 
 #define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0E
-#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_MINOR 0x0F
+#define T4FW_VERSION_MICRO 0x25
 #define T4FW_VERSION_BUILD 0x00
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
 #define T4FW_MIN_VERSION_MICRO 0x00
 
 #define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0E
-#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_MINOR 0x0F
+#define T5FW_VERSION_MICRO 0x25
 #define T5FW_VERSION_BUILD 0x00
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
 #define T5FW_MIN_VERSION_MICRO 0x00
 
 #define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0E
-#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_MINOR 0x0F
+#define T6FW_VERSION_MICRO 0x25
 #define T6FW_VERSION_BUILD 0x00
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
index 4edb98c3c6c70a9c620caa7928e8e5d555db1242..4466a11871109b41861cd1be9ff3dc70d207512f 100644 (file)
@@ -860,6 +860,11 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
        unsigned int entry;
        void *dest;
 
+       if (skb_put_padto(skb, ETHOC_ZLEN)) {
+               dev->stats.tx_errors++;
+               goto out_no_free;
+       }
+
        if (unlikely(skb->len > ETHOC_BUFSIZ)) {
                dev->stats.tx_errors++;
                goto out;
@@ -894,6 +899,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
        skb_tx_timestamp(skb);
 out:
        dev_kfree_skb(skb);
+out_no_free:
        return NETDEV_TX_OK;
 }
 
@@ -1086,7 +1092,7 @@ static int ethoc_probe(struct platform_device *pdev)
        if (!priv->iobase) {
                dev_err(&pdev->dev, "cannot remap I/O memory space\n");
                ret = -ENXIO;
-               goto error;
+               goto free;
        }
 
        if (netdev->mem_end) {
@@ -1095,7 +1101,7 @@ static int ethoc_probe(struct platform_device *pdev)
                if (!priv->membase) {
                        dev_err(&pdev->dev, "cannot remap memory space\n");
                        ret = -ENXIO;
-                       goto error;
+                       goto free;
                }
        } else {
                /* Allocate buffer memory */
@@ -1106,7 +1112,7 @@ static int ethoc_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
                                buffer_size);
                        ret = -ENOMEM;
-                       goto error;
+                       goto free;
                }
                netdev->mem_end = netdev->mem_start + buffer_size;
                priv->dma_alloc = buffer_size;
@@ -1120,7 +1126,7 @@ static int ethoc_probe(struct platform_device *pdev)
                128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
        if (num_bd < 4) {
                ret = -ENODEV;
-               goto error;
+               goto free;
        }
        priv->num_bd = num_bd;
        /* num_tx must be a power of two */
@@ -1133,7 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
        priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
        if (!priv->vma) {
                ret = -ENOMEM;
-               goto error;
+               goto free;
        }
 
        /* Allow the platform setup code to pass in a MAC address. */
index 06f031715b578c897863c906b07176589b862f07..9b7a3f5a2818f1fdc7dfe183991a2a7d3ab03523 100644 (file)
@@ -285,6 +285,7 @@ static void nps_enet_hw_reset(struct net_device *ndev)
        ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
        nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
        usleep_range(10, 20);
+       ge_rst_value = 0;
        nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
 
        /* Tx fifo reset sequence */
index ecdb6854a898738ad5d010c61f6f39021e8f0e51..88f3c85fb04ada575e367042d79f4b8188e12245 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/uaccess.h>
 #include <asm/firmware.h>
 #include <linux/seq_file.h>
+#include <linux/workqueue.h>
 
 #include "ibmvnic.h"
 
@@ -89,6 +90,7 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
 static int ibmvnic_remove(struct vio_dev *);
 static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
@@ -469,7 +471,8 @@ static int ibmvnic_open(struct net_device *netdev)
        crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
        ibmvnic_send_crq(adapter, &crq);
 
-       netif_start_queue(netdev);
+       netif_tx_start_all_queues(netdev);
+
        return 0;
 
 bounce_map_failed:
@@ -519,7 +522,7 @@ static int ibmvnic_close(struct net_device *netdev)
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_disable(&adapter->napi[i]);
 
-       netif_stop_queue(netdev);
+       netif_tx_stop_all_queues(netdev);
 
        if (adapter->bounce_buffer) {
                if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -1212,12 +1215,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
                goto reg_failed;
        }
 
-       scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
-       if (scrq->irq == NO_IRQ) {
-               dev_err(dev, "Error mapping irq\n");
-               goto map_irq_failed;
-       }
-
        scrq->adapter = adapter;
        scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
        scrq->cur = 0;
@@ -1230,12 +1227,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
 
        return scrq;
 
-map_irq_failed:
-       do {
-               rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
-                                       adapter->vdev->unit_address,
-                                       scrq->crq_num);
-       } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 reg_failed:
        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
                         DMA_BIDIRECTIONAL);
@@ -1256,6 +1247,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                        if (adapter->tx_scrq[i]) {
                                free_irq(adapter->tx_scrq[i]->irq,
                                         adapter->tx_scrq[i]);
+                               irq_dispose_mapping(adapter->tx_scrq[i]->irq);
                                release_sub_crq_queue(adapter,
                                                      adapter->tx_scrq[i]);
                        }
@@ -1267,6 +1259,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
                        if (adapter->rx_scrq[i]) {
                                free_irq(adapter->rx_scrq[i]->irq,
                                         adapter->rx_scrq[i]);
+                               irq_dispose_mapping(adapter->rx_scrq[i]->irq);
                                release_sub_crq_queue(adapter,
                                                      adapter->rx_scrq[i]);
                        }
@@ -1276,6 +1269,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
        adapter->requested_caps = 0;
 }
 
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+       int i;
+
+       if (adapter->tx_scrq) {
+               for (i = 0; i < adapter->req_tx_queues; i++)
+                       if (adapter->tx_scrq[i])
+                               release_sub_crq_queue(adapter,
+                                                     adapter->tx_scrq[i]);
+               adapter->tx_scrq = NULL;
+       }
+
+       if (adapter->rx_scrq) {
+               for (i = 0; i < adapter->req_rx_queues; i++)
+                       if (adapter->rx_scrq[i])
+                               release_sub_crq_queue(adapter,
+                                                     adapter->rx_scrq[i]);
+               adapter->rx_scrq = NULL;
+       }
+
+       adapter->requested_caps = 0;
+}
+
 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
                            struct ibmvnic_sub_crq_queue *scrq)
 {
@@ -1395,6 +1411,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
        return IRQ_HANDLED;
 }
 
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+       struct device *dev = &adapter->vdev->dev;
+       struct ibmvnic_sub_crq_queue *scrq;
+       int i = 0, j = 0;
+       int rc = 0;
+
+       for (i = 0; i < adapter->req_tx_queues; i++) {
+               scrq = adapter->tx_scrq[i];
+               scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+               if (scrq->irq == NO_IRQ) {
+                       rc = -EINVAL;
+                       dev_err(dev, "Error mapping irq\n");
+                       goto req_tx_irq_failed;
+               }
+
+               rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+                                0, "ibmvnic_tx", scrq);
+
+               if (rc) {
+                       dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+                               scrq->irq, rc);
+                       irq_dispose_mapping(scrq->irq);
+                       goto req_rx_irq_failed;
+               }
+       }
+
+       for (i = 0; i < adapter->req_rx_queues; i++) {
+               scrq = adapter->rx_scrq[i];
+               scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+               if (scrq->irq == NO_IRQ) {
+                       rc = -EINVAL;
+                       dev_err(dev, "Error mapping irq\n");
+                       goto req_rx_irq_failed;
+               }
+               rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+                                0, "ibmvnic_rx", scrq);
+               if (rc) {
+                       dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+                               scrq->irq, rc);
+                       irq_dispose_mapping(scrq->irq);
+                       goto req_rx_irq_failed;
+               }
+       }
+       return rc;
+
+req_rx_irq_failed:
+       for (j = 0; j < i; j++)
+               free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+               irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+       i = adapter->req_tx_queues;
+req_tx_irq_failed:
+       for (j = 0; j < i; j++)
+               free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+               irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+       release_sub_crqs_no_irqs(adapter);
+       return rc;
+}
+
 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
 {
        struct device *dev = &adapter->vdev->dev;
@@ -1403,8 +1479,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        union ibmvnic_crq crq;
        int total_queues;
        int more = 0;
-       int i, j;
-       int rc;
+       int i;
 
        if (!retry) {
                /* Sub-CRQ entries are 32 byte long */
@@ -1483,13 +1558,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        for (i = 0; i < adapter->req_tx_queues; i++) {
                adapter->tx_scrq[i] = allqueues[i];
                adapter->tx_scrq[i]->pool_index = i;
-               rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
-                                0, "ibmvnic_tx", adapter->tx_scrq[i]);
-               if (rc) {
-                       dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
-                               adapter->tx_scrq[i]->irq, rc);
-                       goto req_tx_irq_failed;
-               }
        }
 
        adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -1500,13 +1568,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
        for (i = 0; i < adapter->req_rx_queues; i++) {
                adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
                adapter->rx_scrq[i]->scrq_num = i;
-               rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
-                                0, "ibmvnic_rx", adapter->rx_scrq[i]);
-               if (rc) {
-                       dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
-                               adapter->rx_scrq[i]->irq, rc);
-                       goto req_rx_irq_failed;
-               }
        }
 
        memset(&crq, 0, sizeof(crq));
@@ -1559,15 +1620,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
 
        return;
 
-req_rx_irq_failed:
-       for (j = 0; j < i; j++)
-               free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
-       i = adapter->req_tx_queues;
-req_tx_irq_failed:
-       for (j = 0; j < i; j++)
-               free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
-       kfree(adapter->rx_scrq);
-       adapter->rx_scrq = NULL;
 rx_failed:
        kfree(adapter->tx_scrq);
        adapter->tx_scrq = NULL;
@@ -2348,9 +2400,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
                         *req_value,
                         (long int)be32_to_cpu(crq->request_capability_rsp.
                                               number), name);
-               release_sub_crqs(adapter);
+               release_sub_crqs_no_irqs(adapter);
                *req_value = be32_to_cpu(crq->request_capability_rsp.number);
-               complete(&adapter->init_done);
+               init_sub_crqs(adapter, 1);
                return;
        default:
                dev_err(dev, "Error %d in request cap rsp\n",
@@ -2659,7 +2711,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
 
 out:
        if (atomic_read(&adapter->running_cap_queries) == 0)
-               complete(&adapter->init_done);
+               init_sub_crqs(adapter, 0);
                /* We're done querying the capabilities, initialize sub-crqs */
 }
 
@@ -3202,8 +3254,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
                        dev_info(dev, "Partner initialized\n");
                        /* Send back a response */
                        rc = ibmvnic_send_crq_init_complete(adapter);
-                       if (rc == 0)
-                               send_version_xchg(adapter);
+                       if (!rc)
+                               schedule_work(&adapter->vnic_crq_init);
                        else
                                dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
                        break;
@@ -3555,8 +3607,63 @@ static const struct file_operations ibmvnic_dump_ops = {
        .release        = single_release,
 };
 
+static void handle_crq_init_rsp(struct work_struct *work)
+{
+       struct ibmvnic_adapter *adapter = container_of(work,
+                                                      struct ibmvnic_adapter,
+                                                      vnic_crq_init);
+       struct device *dev = &adapter->vdev->dev;
+       struct net_device *netdev = adapter->netdev;
+       unsigned long timeout = msecs_to_jiffies(30000);
+       int rc;
+
+       send_version_xchg(adapter);
+       reinit_completion(&adapter->init_done);
+       if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+               dev_err(dev, "Passive init timeout\n");
+               goto task_failed;
+       }
+
+       do {
+               if (adapter->renegotiate) {
+                       adapter->renegotiate = false;
+                       release_sub_crqs_no_irqs(adapter);
+                       send_cap_queries(adapter);
+
+                       reinit_completion(&adapter->init_done);
+                       if (!wait_for_completion_timeout(&adapter->init_done,
+                                                        timeout)) {
+                               dev_err(dev, "Passive init timeout\n");
+                               goto task_failed;
+                       }
+               }
+       } while (adapter->renegotiate);
+       rc = init_sub_crq_irqs(adapter);
+
+       if (rc)
+               goto task_failed;
+
+       netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+       rc = register_netdev(netdev);
+       if (rc) {
+               dev_err(dev,
+                       "failed to register netdev rc=%d\n", rc);
+               goto register_failed;
+       }
+       dev_info(dev, "ibmvnic registered\n");
+
+       return;
+
+register_failed:
+       release_sub_crqs(adapter);
+task_failed:
+       dev_err(dev, "Passive initialization was not successful\n");
+}
+
 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 {
+       unsigned long timeout = msecs_to_jiffies(30000);
        struct ibmvnic_adapter *adapter;
        struct net_device *netdev;
        unsigned char *mac_addr_p;
@@ -3593,6 +3700,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        netdev->ethtool_ops = &ibmvnic_ethtool_ops;
        SET_NETDEV_DEV(netdev, &dev->dev);
 
+       INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+
        spin_lock_init(&adapter->stats_lock);
 
        rc = ibmvnic_init_crq_queue(adapter);
@@ -3635,30 +3744,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        ibmvnic_send_crq_init(adapter);
 
        init_completion(&adapter->init_done);
-       wait_for_completion(&adapter->init_done);
+       if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+               return 0;
 
        do {
-               adapter->renegotiate = false;
-
-               init_sub_crqs(adapter, 0);
-               reinit_completion(&adapter->init_done);
-               wait_for_completion(&adapter->init_done);
-
                if (adapter->renegotiate) {
-                       release_sub_crqs(adapter);
+                       adapter->renegotiate = false;
+                       release_sub_crqs_no_irqs(adapter);
                        send_cap_queries(adapter);
 
                        reinit_completion(&adapter->init_done);
-                       wait_for_completion(&adapter->init_done);
+                       if (!wait_for_completion_timeout(&adapter->init_done,
+                                                        timeout))
+                               return 0;
                }
        } while (adapter->renegotiate);
 
-       /* if init_sub_crqs is partially successful, retry */
-       while (!adapter->tx_scrq || !adapter->rx_scrq) {
-               init_sub_crqs(adapter, 1);
-
-               reinit_completion(&adapter->init_done);
-               wait_for_completion(&adapter->init_done);
+       rc = init_sub_crq_irqs(adapter);
+       if (rc) {
+               dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+               goto free_debugfs;
        }
 
        netdev->real_num_tx_queues = adapter->req_tx_queues;
@@ -3666,12 +3771,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
        rc = register_netdev(netdev);
        if (rc) {
                dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
-               goto free_debugfs;
+               goto free_sub_crqs;
        }
        dev_info(&dev->dev, "ibmvnic registered\n");
 
        return 0;
 
+free_sub_crqs:
+       release_sub_crqs(adapter);
 free_debugfs:
        if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
                debugfs_remove_recursive(adapter->debugfs_dir);
index 0b66a506a4e44e4d36aae861c8ba6289318cd8e1..e82898fd518ef890afbbf79c307833b8127e782c 100644 (file)
@@ -1045,4 +1045,6 @@ struct ibmvnic_adapter {
        u64 opt_rxba_entries_per_subcrq;
        __be64 tx_rx_desc_req;
        u8 map_id;
+
+       struct work_struct vnic_crq_init;
 };
index 73f745205a1c30a834ee6a7ab2de240a6b0fb640..2b2e2f8c636994219e997bdb53ff8d3379275bb9 100644 (file)
@@ -154,16 +154,6 @@ void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
        writel(val, hw->hw_addr + reg);
 }
 
-static bool e1000e_vlan_used(struct e1000_adapter *adapter)
-{
-       u16 vid;
-
-       for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
-               return true;
-
-       return false;
-}
-
 /**
  * e1000_regdump - register printout routine
  * @hw: pointer to the HW structure
@@ -3453,8 +3443,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
 
        ew32(RCTL, rctl);
 
-       if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX ||
-           e1000e_vlan_used(adapter))
+       if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
                e1000e_vlan_strip_enable(adapter);
        else
                e1000e_vlan_strip_disable(adapter);
@@ -6926,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
        if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
                features &= ~NETIF_F_RXFCS;
 
+       /* Since there is no support for separate Rx/Tx vlan accel
+        * enable/disable make sure Tx flag is always in same state as Rx.
+        */
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               features |= NETIF_F_HW_VLAN_CTAG_TX;
+       else
+               features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
        return features;
 }
 
index 5ea22008d721d0a0ad769f32b7ee74ef8b19249d..501f15d9f4d6eef599733f12e7052fedc1563186 100644 (file)
@@ -1344,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
        if (!vsi || !macaddr)
                return NULL;
 
+       /* Do not allow broadcast filter to be added since broadcast filter
+        * is added as part of add VSI for any newly created VSI except
+        * FDIR VSI
+        */
+       if (is_broadcast_ether_addr(macaddr))
+               return NULL;
+
        f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
        if (!f) {
                f = kzalloc(sizeof(*f), GFP_ATOMIC);
@@ -2151,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
                                         aq_ret, pf->hw.aq.asq_last_status);
                        }
                }
-               aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
-                                                  vsi->seid,
-                                                  cur_promisc, NULL);
-               if (aq_ret) {
-                       retval = i40e_aq_rc_to_posix(aq_ret,
-                                                    pf->hw.aq.asq_last_status);
-                       dev_info(&pf->pdev->dev,
-                                "set brdcast promisc failed, err %s, aq_err %s\n",
-                                i40e_stat_str(&pf->hw, aq_ret),
-                                i40e_aq_str(&pf->hw,
-                                            pf->hw.aq.asq_last_status));
-               }
        }
 out:
        /* if something went wrong then set the changed flag so we try again */
@@ -7726,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
  * @vsi: the VSI being configured
  * @v_idx: index of the vector in the vsi struct
+ * @cpu: cpu to be used on affinity_mask
  *
  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
  **/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
 {
        struct i40e_q_vector *q_vector;
 
@@ -7740,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
 
        q_vector->vsi = vsi;
        q_vector->v_idx = v_idx;
-       cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+       cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+
        if (vsi->netdev)
                netif_napi_add(vsi->netdev, &q_vector->napi,
                               i40e_napi_poll, NAPI_POLL_WEIGHT);
@@ -7764,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
-       int v_idx, num_q_vectors;
-       int err;
+       int err, v_idx, num_q_vectors, current_cpu;
 
        /* if not MSIX, give the one vector only to the LAN VSI */
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -7775,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
        else
                return -EINVAL;
 
+       current_cpu = cpumask_first(cpu_online_mask);
+
        for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
-               err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+               err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
                if (err)
                        goto err_out;
+               current_cpu = cpumask_next(current_cpu, cpu_online_mask);
+               if (unlikely(current_cpu >= nr_cpu_ids))
+                       current_cpu = cpumask_first(cpu_online_mask);
        }
 
        return 0;
@@ -9224,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
 static int i40e_add_vsi(struct i40e_vsi *vsi)
 {
        int ret = -ENODEV;
+       i40e_status aq_ret = 0;
        u8 laa_macaddr[ETH_ALEN];
        bool found_laa_mac_filter = false;
        struct i40e_pf *pf = vsi->back;
@@ -9413,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                vsi->seid = ctxt.seid;
                vsi->id = ctxt.vsi_number;
        }
+       /* Except FDIR VSI, for all othet VSI set the broadcast filter */
+       if (vsi->type != I40E_VSI_FDIR) {
+               aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+               if (aq_ret) {
+                       ret = i40e_aq_rc_to_posix(aq_ret,
+                                                 hw->aq.asq_last_status);
+                       dev_info(&pf->pdev->dev,
+                                "set brdcast promisc failed, err %s, aq_err %s\n",
+                                i40e_stat_str(hw, aq_ret),
+                                i40e_aq_str(hw, hw->aq.asq_last_status));
+               }
+       }
 
        spin_lock_bh(&vsi->mac_filter_list_lock);
        /* If macvlan filters already exist, force them to get loaded */
index 55f151fca1dcb785089511105920755fef9397b1..a8868e1bf832557ffa7c68d2840a0b098fdf874d 100644 (file)
@@ -1280,8 +1280,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    union i40e_rx_desc *rx_desc)
 {
        struct i40e_rx_ptype_decoded decoded;
-       bool ipv4, ipv6, tunnel = false;
        u32 rx_error, rx_status;
+       bool ipv4, ipv6;
        u8 ptype;
        u64 qword;
 
@@ -1336,19 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
-       /* The hardware supported by this driver does not validate outer
-        * checksums for tunneled VXLAN or GENEVE frames.  I don't agree
-        * with it but the specification states that you "MAY validate", it
-        * doesn't make it a hard requirement so if we have validated the
-        * inner checksum report CHECKSUM_UNNECESSARY.
+       /* If there is an outer header present that might contain a checksum
+        * we need to bump the checksum level by 1 to reflect the fact that
+        * we are indicating we validated the inner checksum.
         */
-       if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
-                                 I40E_RX_PTYPE_INNER_PROT_UDP |
-                                 I40E_RX_PTYPE_INNER_PROT_SCTP))
-               tunnel = true;
-
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       skb->csum_level = tunnel ? 1 : 0;
+       if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+               skb->csum_level = 1;
+
+       /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+       switch (decoded.inner_prot) {
+       case I40E_RX_PTYPE_INNER_PROT_TCP:
+       case I40E_RX_PTYPE_INNER_PROT_UDP:
+       case I40E_RX_PTYPE_INNER_PROT_SCTP:
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               /* fall though */
+       default:
+               break;
+       }
 
        return;
 
index be99189da925fc5862e9c318489694341a3a68f9..79d99cd91b24317d0cc970a6bdedbb24abeac356 100644 (file)
@@ -752,8 +752,8 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
                                    union i40e_rx_desc *rx_desc)
 {
        struct i40e_rx_ptype_decoded decoded;
-       bool ipv4, ipv6, tunnel = false;
        u32 rx_error, rx_status;
+       bool ipv4, ipv6;
        u8 ptype;
        u64 qword;
 
@@ -808,19 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
        if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
                return;
 
-       /* The hardware supported by this driver does not validate outer
-        * checksums for tunneled VXLAN or GENEVE frames.  I don't agree
-        * with it but the specification states that you "MAY validate", it
-        * doesn't make it a hard requirement so if we have validated the
-        * inner checksum report CHECKSUM_UNNECESSARY.
+       /* If there is an outer header present that might contain a checksum
+        * we need to bump the checksum level by 1 to reflect the fact that
+        * we are indicating we validated the inner checksum.
         */
-       if (decoded.inner_prot & (I40E_RX_PTYPE_INNER_PROT_TCP |
-                                 I40E_RX_PTYPE_INNER_PROT_UDP |
-                                 I40E_RX_PTYPE_INNER_PROT_SCTP))
-               tunnel = true;
-
-       skb->ip_summed = CHECKSUM_UNNECESSARY;
-       skb->csum_level = tunnel ? 1 : 0;
+       if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+               skb->csum_level = 1;
+
+       /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+       switch (decoded.inner_prot) {
+       case I40E_RX_PTYPE_INNER_PROT_TCP:
+       case I40E_RX_PTYPE_INNER_PROT_UDP:
+       case I40E_RX_PTYPE_INNER_PROT_SCTP:
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+               /* fall though */
+       default:
+               break;
+       }
 
        return;
 
index 088c47cf27d97d0f5a8a40992c95ec7c7a761947..8bebd862a54ccd7f4f3defe4fd1a1035188294d6 100644 (file)
@@ -2887,7 +2887,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
 
-       return 0;
+       return min(work_done, budget - 1);
 }
 
 /**
index 61a80da8b6f0dec4323f094a245fc3fcbb6bcab0..2819abc454c7e71c024ab280ec3a5a1780a07a4b 100644 (file)
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
 static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -IXGBE_ERR_MBX;
+       s32 ret_val = IXGBE_ERR_MBX;
 
        if (!mbx->ops.read)
                goto out;
@@ -111,7 +111,7 @@ out:
 static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = -IXGBE_ERR_MBX;
+       s32 ret_val = IXGBE_ERR_MBX;
 
        /* exit if either we can't write or there isn't a defined timeout */
        if (!mbx->ops.write || !mbx->timeout)
index a6d26d351dfc47c777b39c04a44c2f17bca0feab..f92018b13d2869e57ec307029072e3b85ac5041a 100644 (file)
 /* Various constants */
 
 /* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS                1
+#define MVNETA_TXDONE_COAL_PKTS                0       /* interrupt per packet */
 #define MVNETA_RX_COAL_PKTS            32
 #define MVNETA_RX_COAL_USEC            100
 
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
        return 0;
 
 err_free_irq:
+       unregister_cpu_notifier(&pp->cpu_notifier);
+       on_each_cpu(mvneta_percpu_disable, pp, true);
        free_percpu_irq(pp->dev->irq, pp->ports);
 err_cleanup_txqs:
        mvneta_cleanup_txqs(pp);
index fc95affaf76b5d7b36d3c745e7fe271c8442cbb8..44cf16d01f4275501870e3711fa27e5d8ec43a0e 100644 (file)
@@ -1042,6 +1042,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
+       struct mlx4_en_priv *tmp;
        u32 rx_size, tx_size;
        int port_up = 0;
        int err = 0;
@@ -1061,22 +1063,25 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
            tx_size == priv->tx_ring[0]->size)
                return 0;
 
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
        mutex_lock(&mdev->state_lock);
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       new_prof.tx_ring_size = tx_size;
+       new_prof.rx_ring_size = rx_size;
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       mlx4_en_free_resources(priv);
-
-       priv->prof->tx_ring_size = tx_size;
-       priv->prof->rx_ring_size = rx_size;
+       mlx4_en_safe_replace_resources(priv, tmp);
 
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
        if (port_up) {
                err = mlx4_en_start_port(dev);
                if (err)
@@ -1084,8 +1089,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
        }
 
        err = mlx4_en_moderation_update(priv);
-
 out:
+       kfree(tmp);
        mutex_unlock(&mdev->state_lock);
        return err;
 }
@@ -1714,6 +1719,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
+       struct mlx4_en_priv *tmp;
        int port_up = 0;
        int err = 0;
 
@@ -1723,23 +1730,26 @@ static int mlx4_en_set_channels(struct net_device *dev,
            !channel->tx_count || !channel->rx_count)
                return -EINVAL;
 
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
        mutex_lock(&mdev->state_lock);
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       new_prof.num_tx_rings_p_up = channel->tx_count;
+       new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+       new_prof.rx_ring_num = channel->rx_count;
+
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       mlx4_en_free_resources(priv);
-
-       priv->num_tx_rings_p_up = channel->tx_count;
-       priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
-       priv->rx_ring_num = channel->rx_count;
-
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
+       mlx4_en_safe_replace_resources(priv, tmp);
 
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -1757,8 +1767,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
        }
 
        err = mlx4_en_moderation_update(priv);
-
 out:
+       kfree(tmp);
        mutex_unlock(&mdev->state_lock);
        return err;
 }
index 0c0dfd6cdca64e53152016465d67727d139c6375..8359e9e51b3b4c99d95238575fbe0285b61fa571 100644 (file)
@@ -1954,7 +1954,7 @@ static int mlx4_en_close(struct net_device *dev)
        return 0;
 }
 
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 {
        int i;
 
@@ -1979,7 +1979,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
 
 }
 
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
 {
        struct mlx4_en_port_profile *prof = priv->prof;
        int i;
@@ -2044,6 +2044,77 @@ static void mlx4_en_shutdown(struct net_device *dev)
        rtnl_unlock();
 }
 
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
+                            struct mlx4_en_priv *src,
+                            struct mlx4_en_port_profile *prof)
+{
+       memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
+              sizeof(dst->hwtstamp_config));
+       dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+       dst->tx_ring_num = prof->tx_ring_num;
+       dst->rx_ring_num = prof->rx_ring_num;
+       dst->flags = prof->flags;
+       dst->mdev = src->mdev;
+       dst->port = src->port;
+       dst->dev = src->dev;
+       dst->prof = prof;
+       dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+                                        DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+
+       dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+                               GFP_KERNEL);
+       if (!dst->tx_ring)
+               return -ENOMEM;
+
+       dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+                             GFP_KERNEL);
+       if (!dst->tx_cq) {
+               kfree(dst->tx_ring);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+                               struct mlx4_en_priv *src)
+{
+       memcpy(dst->rx_ring, src->rx_ring,
+              sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
+       memcpy(dst->rx_cq, src->rx_cq,
+              sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
+       memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
+              sizeof(dst->hwtstamp_config));
+       dst->tx_ring_num = src->tx_ring_num;
+       dst->rx_ring_num = src->rx_ring_num;
+       dst->tx_ring = src->tx_ring;
+       dst->tx_cq = src->tx_cq;
+       memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
+}
+
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+                               struct mlx4_en_priv *tmp,
+                               struct mlx4_en_port_profile *prof)
+{
+       mlx4_en_copy_priv(tmp, priv, prof);
+
+       if (mlx4_en_alloc_resources(tmp)) {
+               en_warn(priv,
+                       "%s: Resource allocation failed, using previous configuration\n",
+                       __func__);
+               kfree(tmp->tx_ring);
+               kfree(tmp->tx_cq);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+                                   struct mlx4_en_priv *tmp)
+{
+       mlx4_en_free_resources(priv);
+       mlx4_en_update_priv(priv, tmp);
+}
+
 void mlx4_en_destroy_netdev(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -2080,6 +2151,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
        mdev->upper[priv->port] = NULL;
        mutex_unlock(&mdev->state_lock);
 
+#ifdef CONFIG_RFS_ACCEL
+       mlx4_en_cleanup_filters(priv);
+#endif
+
        mlx4_en_free_resources(priv);
 
        kfree(priv->tx_ring);
@@ -3124,6 +3199,8 @@ int mlx4_en_reset_config(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
+       struct mlx4_en_port_profile new_prof;
+       struct mlx4_en_priv *tmp;
        int port_up = 0;
        int err = 0;
 
@@ -3140,19 +3217,29 @@ int mlx4_en_reset_config(struct net_device *dev,
                return -EINVAL;
        }
 
+       tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
        mutex_lock(&mdev->state_lock);
+
+       memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+       memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       if (err)
+               goto out;
+
        if (priv->port_up) {
                port_up = 1;
                mlx4_en_stop_port(dev, 1);
        }
 
-       mlx4_en_free_resources(priv);
-
        en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
-               ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+               ts_config.rx_filter,
+               !!(features & NETIF_F_HW_VLAN_CTAG_RX));
 
-       priv->hwtstamp_config.tx_type = ts_config.tx_type;
-       priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+       mlx4_en_safe_replace_resources(priv, tmp);
 
        if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
                if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3186,11 +3273,6 @@ int mlx4_en_reset_config(struct net_device *dev,
                dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
        }
 
-       err = mlx4_en_alloc_resources(priv);
-       if (err) {
-               en_err(priv, "Failed reallocating port resources\n");
-               goto out;
-       }
        if (port_up) {
                err = mlx4_en_start_port(dev);
                if (err)
@@ -3199,6 +3281,8 @@ int mlx4_en_reset_config(struct net_device *dev,
 
 out:
        mutex_unlock(&mdev->state_lock);
-       netdev_features_change(dev);
+       kfree(tmp);
+       if (!err)
+               netdev_features_change(dev);
        return err;
 }
index c1b3a9c8cf3b4db9412e722e09983e72c64e3caa..99b5407f2278c0f292961cf8b458e4b4dfa584e7 100644 (file)
@@ -514,9 +514,6 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
        ring->rx_info = NULL;
        kfree(ring);
        *pring = NULL;
-#ifdef CONFIG_RFS_ACCEL
-       mlx4_en_cleanup_filters(priv);
-#endif
 }
 
 void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
index 467d47ed2c394093ca8be3fa0ce1f1a26dc1a038..13d297ee34bb3f7fa195891e3959ce62605e580b 100644 (file)
@@ -353,12 +353,14 @@ struct mlx4_en_port_profile {
        u32 rx_ring_num;
        u32 tx_ring_size;
        u32 rx_ring_size;
+       u8 num_tx_rings_p_up;
        u8 rx_pause;
        u8 rx_ppp;
        u8 tx_pause;
        u8 tx_ppp;
        int rss_rings;
        int inline_thold;
+       struct hwtstamp_config hwtstamp_config;
 };
 
 struct mlx4_en_profile {
@@ -623,8 +625,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
                              u8 rx_ppp, u8 rx_pause,
                              u8 tx_ppp, u8 tx_pause);
 
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+                               struct mlx4_en_priv *tmp,
+                               struct mlx4_en_port_profile *prof);
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+                                   struct mlx4_en_priv *tmp);
 
 int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
                      int entries, int ring, enum cq_type mode, int node);
index 0b4986268cc9923ad672eeb03d9cbdf8edefa7cd..d6e2a1cae19ae2d6d636f1d306bca8e607106095 100644 (file)
@@ -295,6 +295,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
        case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+       case MLX5_CMD_OP_2ERR_QP:
+       case MLX5_CMD_OP_2RST_QP:
+       case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+       case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+       case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+       case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
                return MLX5_CMD_STAT_OK;
 
        case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -321,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_RTR2RTS_QP:
        case MLX5_CMD_OP_RTS2RTS_QP:
        case MLX5_CMD_OP_SQERR2RTS_QP:
-       case MLX5_CMD_OP_2ERR_QP:
-       case MLX5_CMD_OP_2RST_QP:
        case MLX5_CMD_OP_QUERY_QP:
        case MLX5_CMD_OP_SQD_RTS_QP:
        case MLX5_CMD_OP_INIT2INIT_QP:
@@ -342,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
        case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
        case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
-       case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
        case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
        case MLX5_CMD_OP_SET_ROCE_ADDRESS:
        case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -390,11 +393,12 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
        case MLX5_CMD_OP_CREATE_RQT:
        case MLX5_CMD_OP_MODIFY_RQT:
        case MLX5_CMD_OP_QUERY_RQT:
+
        case MLX5_CMD_OP_CREATE_FLOW_TABLE:
        case MLX5_CMD_OP_QUERY_FLOW_TABLE:
        case MLX5_CMD_OP_CREATE_FLOW_GROUP:
        case MLX5_CMD_OP_QUERY_FLOW_GROUP:
-       case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+
        case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
        case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
        case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
@@ -602,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
                pr_debug("\n");
 }
 
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+       struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+       return be16_to_cpu(hdr->opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+       struct delayed_work *dwork = container_of(work, struct delayed_work,
+                                                 work);
+       struct mlx5_cmd_work_ent *ent = container_of(dwork,
+                                                    struct mlx5_cmd_work_ent,
+                                                    cb_timeout_work);
+       struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+                                                cmd);
+
+       ent->ret = -ETIMEDOUT;
+       mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+                      mlx5_command_str(msg_to_opcode(ent->in)),
+                      msg_to_opcode(ent->in));
+       mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
 static void cmd_work_handler(struct work_struct *work)
 {
        struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
        struct mlx5_cmd *cmd = ent->cmd;
        struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+       unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
        struct mlx5_cmd_layout *lay;
        struct semaphore *sem;
        unsigned long flags;
@@ -647,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
        dump_command(dev, ent, 1);
        ent->ts1 = ktime_get_ns();
 
+       if (ent->callback)
+               schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
        /* ring doorbell after the descriptor is valid */
        mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
        wmb();
@@ -691,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
        }
 }
 
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
-       struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
-       return be16_to_cpu(hdr->opcode);
-}
-
 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
 {
        unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -706,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
 
        if (cmd->mode == CMD_MODE_POLLING) {
                wait_for_completion(&ent->done);
-               err = ent->ret;
-       } else {
-               if (!wait_for_completion_timeout(&ent->done, timeout))
-                       err = -ETIMEDOUT;
-               else
-                       err = 0;
+       } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+               ent->ret = -ETIMEDOUT;
+               mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
        }
+
+       err = ent->ret;
+
        if (err == -ETIMEDOUT) {
                mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
                               mlx5_command_str(msg_to_opcode(ent->in)),
@@ -761,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
        if (!callback)
                init_completion(&ent->done);
 
+       INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
        INIT_WORK(&ent->work, cmd_work_handler);
        if (page_queue) {
                cmd_work_handler(&ent->work);
@@ -770,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
                goto out_free;
        }
 
-       if (!callback) {
-               err = wait_func(dev, ent);
-               if (err == -ETIMEDOUT)
-                       goto out;
-
-               ds = ent->ts2 - ent->ts1;
-               op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
-               if (op < ARRAY_SIZE(cmd->stats)) {
-                       stats = &cmd->stats[op];
-                       spin_lock_irq(&stats->lock);
-                       stats->sum += ds;
-                       ++stats->n;
-                       spin_unlock_irq(&stats->lock);
-               }
-               mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
-                                  "fw exec time for %s is %lld nsec\n",
-                                  mlx5_command_str(op), ds);
-               *status = ent->status;
-               free_cmd(ent);
-       }
+       if (callback)
+               goto out;
 
-       return err;
+       err = wait_func(dev, ent);
+       if (err == -ETIMEDOUT)
+               goto out_free;
+
+       ds = ent->ts2 - ent->ts1;
+       op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+       if (op < ARRAY_SIZE(cmd->stats)) {
+               stats = &cmd->stats[op];
+               spin_lock_irq(&stats->lock);
+               stats->sum += ds;
+               ++stats->n;
+               spin_unlock_irq(&stats->lock);
+       }
+       mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+                          "fw exec time for %s is %lld nsec\n",
+                          mlx5_command_str(op), ds);
+       *status = ent->status;
 
 out_free:
        free_cmd(ent);
@@ -1181,41 +1205,30 @@ err_dbg:
        return err;
 }
 
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
        int i;
 
        for (i = 0; i < cmd->max_reg_cmds; i++)
                down(&cmd->sem);
-
        down(&cmd->pages_sem);
 
-       flush_workqueue(cmd->wq);
-
-       cmd->mode = CMD_MODE_EVENTS;
+       cmd->mode = mode;
 
        up(&cmd->pages_sem);
        for (i = 0; i < cmd->max_reg_cmds; i++)
                up(&cmd->sem);
 }
 
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
 {
-       struct mlx5_cmd *cmd = &dev->cmd;
-       int i;
-
-       for (i = 0; i < cmd->max_reg_cmds; i++)
-               down(&cmd->sem);
-
-       down(&cmd->pages_sem);
-
-       flush_workqueue(cmd->wq);
-       cmd->mode = CMD_MODE_POLLING;
+       mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
 
-       up(&cmd->pages_sem);
-       for (i = 0; i < cmd->max_reg_cmds; i++)
-               up(&cmd->sem);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+       mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
 }
 
 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1251,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
                        struct semaphore *sem;
 
                        ent = cmd->ent_arr[i];
+                       if (ent->callback)
+                               cancel_delayed_work(&ent->cb_timeout_work);
                        if (ent->page_queue)
                                sem = &cmd->pages_sem;
                        else
index baa991a23475f31f255a657aeae81262a22396d7..943b1bd434bf50cf6ef26b408035f3af4ea6c38a 100644 (file)
@@ -145,7 +145,6 @@ struct mlx5e_umr_wqe {
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
 #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_MIN_BW_ALLOC 1   /* Min percentage of BW allocation */
 #endif
 
 struct mlx5e_params {
@@ -191,6 +190,7 @@ struct mlx5e_tstamp {
 enum {
        MLX5E_RQ_STATE_POST_WQES_ENABLE,
        MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+       MLX5E_RQ_STATE_FLUSH_TIMEOUT,
 };
 
 struct mlx5e_cq {
@@ -220,6 +220,8 @@ typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
 typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
                                  u16 ix);
 
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
 struct mlx5e_dma_info {
        struct page     *page;
        dma_addr_t      addr;
@@ -241,6 +243,7 @@ struct mlx5e_rq {
        struct mlx5e_cq        cq;
        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
        mlx5e_fp_alloc_wqe     alloc_wqe;
+       mlx5e_fp_dealloc_wqe   dealloc_wqe;
 
        unsigned long          state;
        int                    ix;
@@ -305,6 +308,7 @@ struct mlx5e_sq_dma {
 enum {
        MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
        MLX5E_SQ_STATE_BF_ENABLE,
+       MLX5E_SQ_STATE_TX_TIMEOUT,
 };
 
 struct mlx5e_ico_wqe_info {
@@ -538,6 +542,7 @@ struct mlx5e_priv {
        struct workqueue_struct    *wq;
        struct work_struct         update_carrier_work;
        struct work_struct         set_rx_mode_work;
+       struct work_struct         tx_timeout_work;
        struct delayed_work        update_stats_work;
 
        struct mlx5_core_dev      *mdev;
@@ -589,12 +594,16 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
 int mlx5e_napi_poll(struct napi_struct *napi, int budget);
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
 
 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
 int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
 int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
 void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
 void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
                                    struct mlx5_cqe64 *cqe,
index b2db180ae2a5bbdda29219d63c72feea0958c12c..c585349e05c38ed26b49898d23be1ef83fa721a6 100644 (file)
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
                        tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
                        break;
                case IEEE_8021QAZ_TSA_ETS:
-                       tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+                       tc_tx_bw[i] = ets->tc_tx_bw[i];
                        break;
                }
        }
@@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
 
        /* Validate Bandwidth Sum */
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+               if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+                       if (!ets->tc_tx_bw[i])
+                               return -EINVAL;
+
                        bw_sum += ets->tc_tx_bw[i];
+               }
        }
 
        if (bw_sum != 0 && bw_sum != 100)
index cb6defd71fc129c693f4ddca3f6677c97e8b69ac..5a4d88c2cdb292880496831603fb741f5700a880 100644 (file)
 #include "eswitch.h"
 #include "vxlan.h"
 
+enum {
+       MLX5_EN_QP_FLUSH_TIMEOUT_MS     = 5000,
+       MLX5_EN_QP_FLUSH_MSLEEP_QUANT   = 20,
+       MLX5_EN_QP_FLUSH_MAX_ITER       = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
+                                         MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
+};
+
 struct mlx5e_rq_param {
        u32                        rqc[MLX5_ST_SZ_DW(rqc)];
        struct mlx5_wq_param       wq;
@@ -74,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
        port_state = mlx5_query_vport_state(mdev,
                MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
 
-       if (port_state == VPORT_STATE_UP)
+       if (port_state == VPORT_STATE_UP) {
+               netdev_info(priv->netdev, "Link up\n");
                netif_carrier_on(priv->netdev);
-       else
+       } else {
+               netdev_info(priv->netdev, "Link down\n");
                netif_carrier_off(priv->netdev);
+       }
 }
 
 static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -91,6 +101,26 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
        mutex_unlock(&priv->state_lock);
 }
 
+static void mlx5e_tx_timeout_work(struct work_struct *work)
+{
+       struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+                                              tx_timeout_work);
+       int err;
+
+       rtnl_lock();
+       mutex_lock(&priv->state_lock);
+       if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+               goto unlock;
+       mlx5e_close_locked(priv->netdev);
+       err = mlx5e_open_locked(priv->netdev);
+       if (err)
+               netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+                          err);
+unlock:
+       mutex_unlock(&priv->state_lock);
+       rtnl_unlock();
+}
+
 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
 {
        struct mlx5e_sw_stats *s = &priv->stats.sw;
@@ -305,6 +335,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                }
                rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
                rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+               rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
 
                rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
                rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
@@ -320,6 +351,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                }
                rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
                rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+               rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
 
                rq->wqe_sz = (priv->params.lro_en) ?
                                priv->params.lro_wqe_sz :
@@ -525,17 +557,25 @@ err_destroy_rq:
 
 static void mlx5e_close_rq(struct mlx5e_rq *rq)
 {
+       int tout = 0;
+       int err;
+
        clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
        napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
 
-       mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
-       while (!mlx5_wq_ll_is_empty(&rq->wq))
-               msleep(20);
+       err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+       while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
+              tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
+               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+
+       if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
+               set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
 
        /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
        napi_synchronize(&rq->channel->napi);
 
        mlx5e_disable_rq(rq);
+       mlx5e_free_rx_descs(rq);
        mlx5e_destroy_rq(rq);
 }
 
@@ -782,6 +822,9 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 
 static void mlx5e_close_sq(struct mlx5e_sq *sq)
 {
+       int tout = 0;
+       int err;
+
        if (sq->txq) {
                clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
                /* prevent netif_tx_wake_queue */
@@ -792,15 +835,24 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
                if (mlx5e_sq_has_room_for(sq, 1))
                        mlx5e_send_nop(sq, true);
 
-               mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+               err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+                                     MLX5_SQC_STATE_ERR);
+               if (err)
+                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
        }
 
-       while (sq->cc != sq->pc) /* wait till sq is empty */
-               msleep(20);
+       /* wait till sq is empty, unless a TX timeout occurred on this SQ */
+       while (sq->cc != sq->pc &&
+              !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
+               msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+               if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
+                       set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+       }
 
        /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
        napi_synchronize(&sq->channel->napi);
 
+       mlx5e_free_tx_descs(sq);
        mlx5e_disable_sq(sq);
        mlx5e_destroy_sq(sq);
 }
@@ -1296,6 +1348,11 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
                        goto err_close_channels;
        }
 
+       /* FIXME: This is a W/A for tx timeout watch dog false alarm when
+        * polling for inactive tx queues.
+        */
+       netif_tx_start_all_queues(priv->netdev);
+
        kfree(cparam);
        return 0;
 
@@ -1315,6 +1372,12 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
 {
        int i;
 
+       /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+        * polling for inactive tx queues.
+        */
+       netif_tx_stop_all_queues(priv->netdev);
+       netif_tx_disable(priv->netdev);
+
        for (i = 0; i < priv->params.num_channels; i++)
                mlx5e_close_channel(priv->channel[i]);
 
@@ -1658,8 +1721,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
 
        netdev_set_num_tc(netdev, ntc);
 
+       /* Map netdev TCs to offset 0
+        * We have our own UP to TXQ mapping for QoS
+        */
        for (tc = 0; tc < ntc; tc++)
-               netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+               netdev_set_tc_queue(netdev, tc, nch, 0);
 }
 
 int mlx5e_open_locked(struct net_device *netdev)
@@ -2590,6 +2656,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
        return features;
 }
 
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+       struct mlx5e_priv *priv = netdev_priv(dev);
+       bool sched_work = false;
+       int i;
+
+       netdev_err(dev, "TX timeout detected\n");
+
+       for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+               struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+               if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+                       continue;
+               sched_work = true;
+               set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+               netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+                          i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+       }
+
+       if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+               schedule_work(&priv->tx_timeout_work);
+}
+
 static const struct net_device_ops mlx5e_netdev_ops_basic = {
        .ndo_open                = mlx5e_open,
        .ndo_stop                = mlx5e_close,
@@ -2607,6 +2696,7 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer       = mlx5e_rx_flow_steer,
 #endif
+       .ndo_tx_timeout          = mlx5e_tx_timeout,
 };
 
 static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2636,6 +2726,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
        .ndo_get_vf_config       = mlx5e_get_vf_config,
        .ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
        .ndo_get_vf_stats        = mlx5e_get_vf_stats,
+       .ndo_tx_timeout          = mlx5e_tx_timeout,
 };
 
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2838,6 +2929,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
 
        INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
        INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+       INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
        INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
 }
 
index 022acc2e8922ae177d26411eee85d900c8150f8d..9f2a16a507e04f8cd9861251ab3d0d5973b23e90 100644 (file)
@@ -212,6 +212,20 @@ err_free_skb:
        return -ENOMEM;
 }
 
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+       struct sk_buff *skb = rq->skb[ix];
+
+       if (skb) {
+               rq->skb[ix] = NULL;
+               dma_unmap_single(rq->pdev,
+                                *((dma_addr_t *)skb->cb),
+                                rq->wqe_sz,
+                                DMA_FROM_DEVICE);
+               dev_kfree_skb(skb);
+       }
+}
+
 static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
 {
        return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
@@ -574,6 +588,30 @@ int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
        return 0;
 }
 
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+       struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+
+       wi->free_wqe(rq, wi);
+}
+
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+       struct mlx5_wq_ll *wq = &rq->wq;
+       struct mlx5e_rx_wqe *wqe;
+       __be16 wqe_ix_be;
+       u16 wqe_ix;
+
+       while (!mlx5_wq_ll_is_empty(wq)) {
+               wqe_ix_be = *wq->tail_next;
+               wqe_ix    = be16_to_cpu(wqe_ix_be);
+               wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+               rq->dealloc_wqe(rq, wqe_ix);
+               mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+                              &wqe->next.next_wqe_index);
+       }
+}
+
 #define RQ_CANNOT_POST(rq) \
                (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
                 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
@@ -878,6 +916,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
        struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
        int work_done = 0;
 
+       if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+               return 0;
+
        if (cq->decmprs_left)
                work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
 
index 5a750b9cd006fcc7f3172983875a120aeef75fed..5740b465ef8430f359ed218adf9f93bce9c46e8a 100644 (file)
@@ -110,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
        int channel_ix = fallback(dev, skb);
-       int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
-                skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
+       int up = 0;
+
+       if (!netdev_get_num_tc(dev))
+               return channel_ix;
+
+       if (skb_vlan_tag_present(skb))
+               up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+       /* channel_ix can be larger than num_channels since
+        * dev->num_real_tx_queues = num_channels * num_tc
+        */
+       if (channel_ix >= priv->params.num_channels)
+               channel_ix = reciprocal_scale(channel_ix,
+                                             priv->params.num_channels);
 
        return priv->channeltc_to_txq_map[channel_ix][up];
 }
@@ -123,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
         * headers and occur before the data gather.
         * Therefore these headers must be copied into the WQE
         */
-#define MLX5E_MIN_INLINE ETH_HLEN
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
 
        if (bf) {
                u16 ihs = skb_headlen(skb);
@@ -135,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
                        return skb_headlen(skb);
        }
 
-       return MLX5E_MIN_INLINE;
+       return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
 }
 
 static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -341,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
        return mlx5e_sq_xmit(sq, skb);
 }
 
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+       struct mlx5e_tx_wqe_info *wi;
+       struct sk_buff *skb;
+       u16 ci;
+       int i;
+
+       while (sq->cc != sq->pc) {
+               ci = sq->cc & sq->wq.sz_m1;
+               skb = sq->skb[ci];
+               wi = &sq->wqe_info[ci];
+
+               if (!skb) { /* nop */
+                       sq->cc++;
+                       continue;
+               }
+
+               for (i = 0; i < wi->num_dma; i++) {
+                       struct mlx5e_sq_dma *dma =
+                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+                       mlx5e_tx_dma_unmap(sq->pdev, dma);
+               }
+
+               dev_kfree_skb_any(skb);
+               sq->cc += wi->num_wqebbs;
+       }
+}
+
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
        struct mlx5e_sq *sq;
@@ -352,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
        sq = container_of(cq, struct mlx5e_sq, cq);
 
+       if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+               return false;
+
        npkts = 0;
        nbytes = 0;
 
index 42d16b9458e485205a344ab9a988caf7ae185632..96a59463ae65f1773856949b17a9224531cfe930 100644 (file)
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
 
 void mlx5_enter_error_state(struct mlx5_core_dev *dev)
 {
+       mutex_lock(&dev->intf_state_mutex);
        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
-               return;
+               goto unlock;
 
        mlx5_core_err(dev, "start\n");
-       if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+       if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
                dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+               trigger_cmd_completions(dev);
+       }
 
        mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
        mlx5_core_err(dev, "end\n");
+
+unlock:
+       mutex_unlock(&dev->intf_state_mutex);
 }
 
 static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
        u32 count;
 
        if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-               trigger_cmd_completions(dev);
                mod_timer(&health->timer, get_next_poll_jiffies());
                return;
        }
index c65f4a13e17ec8f81e1cdd2ea1f35780b966c603..6695893ddd2d407743e8329959a05840c28ad3f1 100644 (file)
@@ -1422,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
        mlx5_pci_err_detected(dev->pdev, 0);
 }
 
-/* wait for the device to show vital signs. For now we check
- * that we can read the device ID and that the health buffer
- * shows a non zero value which is different than 0xffffffff
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
  */
-static void wait_vital(struct pci_dev *pdev)
+static int wait_vital(struct pci_dev *pdev)
 {
        struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
        struct mlx5_core_health *health = &dev->priv.health;
        const int niter = 100;
+       u32 last_count = 0;
        u32 count;
-       u16 did;
        int i;
 
-       /* Wait for firmware to be ready after reset */
-       msleep(1000);
-       for (i = 0; i < niter; i++) {
-               if (pci_read_config_word(pdev, 2, &did)) {
-                       dev_warn(&pdev->dev, "failed reading config word\n");
-                       break;
-               }
-               if (did == pdev->device) {
-                       dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
-                       break;
-               }
-               msleep(50);
-       }
-       if (i == niter)
-               dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-
        for (i = 0; i < niter; i++) {
                count = ioread32be(health->health_counter);
                if (count && count != 0xffffffff) {
-                       dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
-                       break;
+                       if (last_count && last_count != count) {
+                               dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+                               return 0;
+                       }
+                       last_count = count;
                }
                msleep(50);
        }
 
-       if (i == niter)
-               dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+       return -ETIMEDOUT;
 }
 
 static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1473,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
        dev_info(&pdev->dev, "%s was called\n", __func__);
 
        pci_save_state(pdev);
-       wait_vital(pdev);
+       err = wait_vital(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+               return;
+       }
 
        err = mlx5_load_one(dev, priv);
        if (err)
index 9eeee0545f1cf294ecddb7bb0becc873df8906da..32dea3524cee3ed4d984eb5aafc7a06abdbd713b 100644 (file)
@@ -345,7 +345,6 @@ retry:
                               func_id, npages, err);
                goto out_4k;
        }
-       dev->priv.fw_pages += npages;
 
        err = mlx5_cmd_status_to_err(&out.hdr);
        if (err) {
@@ -373,6 +372,33 @@ out_free:
        return err;
 }
 
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+                            struct mlx5_manage_pages_inbox *in, int in_size,
+                            struct mlx5_manage_pages_outbox *out, int out_size)
+{
+       struct fw_page *fwp;
+       struct rb_node *p;
+       u32 npages;
+       u32 i = 0;
+
+       if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+               return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
+                                                 (u32 *)out, out_size);
+
+       npages = be32_to_cpu(in->num_entries);
+
+       p = rb_first(&dev->priv.page_root);
+       while (p && i < npages) {
+               fwp = rb_entry(p, struct fw_page, rb_node);
+               out->pas[i] = cpu_to_be64(fwp->addr);
+               p = rb_next(p);
+               i++;
+       }
+
+       out->num_entries = cpu_to_be32(i);
+       return 0;
+}
+
 static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                         int *nclaimed)
 {
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
        in.func_id = cpu_to_be16(func_id);
        in.num_entries = cpu_to_be32(npages);
        mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
-       err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+       err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
        if (err) {
-               mlx5_core_err(dev, "failed reclaiming pages\n");
-               goto out_free;
-       }
-       dev->priv.fw_pages -= npages;
-
-       if (out->hdr.status) {
-               err = mlx5_cmd_status_to_err(&out->hdr);
+               mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
                goto out_free;
        }
 
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
                err = -EINVAL;
                goto out_free;
        }
-       if (nclaimed)
-               *nclaimed = num_claimed;
 
        for (i = 0; i < num_claimed; i++) {
                addr = be64_to_cpu(out->pas[i]);
                free_4k(dev, addr);
        }
+
+       if (nclaimed)
+               *nclaimed = num_claimed;
+
        dev->priv.fw_pages -= num_claimed;
        if (func_id)
                dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
                p = rb_first(&dev->priv.page_root);
                if (p) {
                        fwp = rb_entry(p, struct fw_page, rb_node);
-                       if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-                               free_4k(dev, fwp->addr);
-                               nclaimed = 1;
-                       } else {
-                               err = reclaim_pages(dev, fwp->func_id,
-                                                   optimal_reclaimed_pages(),
-                                                   &nclaimed);
-                       }
+                       err = reclaim_pages(dev, fwp->func_id,
+                                           optimal_reclaimed_pages(),
+                                           &nclaimed);
+
                        if (err) {
                                mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
                                               err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
                }
        } while (p);
 
+       WARN(dev->priv.fw_pages,
+            "FW pages counter is %d after reclaiming all pages\n",
+            dev->priv.fw_pages);
+       WARN(dev->priv.vfs_pages,
+            "VFs FW pages counter is %d after reclaiming all pages\n",
+            dev->priv.vfs_pages);
+
        return 0;
 }
 
index daf44cd4c566f45649b9efe1e4417b4e868c9048..91846dfcbe9cf1b39f0fbf6af8856ad4771dd1ff 100644 (file)
@@ -513,7 +513,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
 {
        int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
        void *nic_vport_context;
-       u8 *guid;
        void *in;
        int err;
 
@@ -535,8 +534,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
 
        nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
                                         in, nic_vport_context);
-       guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context,
-                           node_guid);
        MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
 
        err = mlx5_modify_nic_vport_context(mdev, in, inlen);
index 05de77267d58a910193715fa85cdb411f9858457..e25a73ed2981848efa6175a6eeb43ad6891df1ec 100644 (file)
@@ -72,8 +72,8 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
        u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
        u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
 
-       memset(&in, 0, sizeof(in));
-       memset(&out, 0, sizeof(out));
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
 
        MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
                 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
index 1977e7a5c5301cc591fade66cb290eb7dbd163e1..57d48da709fb77076c2cd737ad87c02ef0ff3f90 100644 (file)
@@ -2718,7 +2718,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
  * Configures the switch priority to buffer table.
  */
 #define MLXSW_REG_PPTB_ID 0x500B
-#define MLXSW_REG_PPTB_LEN 0x0C
+#define MLXSW_REG_PPTB_LEN 0x10
 
 static const struct mlxsw_reg_info mlxsw_reg_pptb = {
        .id = MLXSW_REG_PPTB_ID,
@@ -2784,6 +2784,13 @@ MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
  */
 MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
 
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
 #define MLXSW_REG_PPTB_ALL_PRIO 0xFF
 
 static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
@@ -2792,6 +2799,14 @@ static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
        mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
        mlxsw_reg_pptb_local_port_set(payload, local_port);
        mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+       mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+                                                   u8 buff)
+{
+       mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+       mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
 }
 
 /* PBMC - Port Buffer Management Control Register
index 660429ebfbe16b4ca9ff6fc6ae92fc280bd2f1be..374080027b2f2da103a5a2e89b16f16e689239ec 100644 (file)
@@ -171,23 +171,6 @@ static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
 }
 
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
-                                        bool *p_is_up)
-{
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       char paos_pl[MLXSW_REG_PAOS_LEN];
-       u8 oper_status;
-       int err;
-
-       mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
-       err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
-       if (err)
-               return err;
-       oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
-       *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
-       return 0;
-}
-
 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
                                      unsigned char *addr)
 {
@@ -1434,7 +1417,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev,
 
        cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
                         mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
-                        SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+                        SUPPORTED_Pause | SUPPORTED_Asym_Pause |
+                        SUPPORTED_Autoneg;
        cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
        mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
                                        eth_proto_oper, cmd);
@@ -1493,7 +1477,6 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
        u32 eth_proto_new;
        u32 eth_proto_cap;
        u32 eth_proto_admin;
-       bool is_up;
        int err;
 
        speed = ethtool_cmd_speed(cmd);
@@ -1525,12 +1508,7 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
                return err;
        }
 
-       err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
-       if (err) {
-               netdev_err(dev, "Failed to get oper status");
-               return err;
-       }
-       if (!is_up)
+       if (!netif_running(dev))
                return 0;
 
        err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
index a3720a0fad7d6ff0573256bc95d081e8d25ff2e1..074cdda7b6f337a6985e10a8d3620dd2825d2f3e 100644 (file)
@@ -194,7 +194,7 @@ static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
 
        mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, 0);
+               mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
                               pptb_pl);
 }
index 0b323661c0b6b3122c9aba83fc8d0e3d0f2e68ad..01cfb75128278ca2a1b263636c62314d5bc3d1c2 100644 (file)
@@ -103,7 +103,8 @@ static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
 
        mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
-               mlxsw_reg_pptb_prio_to_buff_set(pptb_pl, i, prio_tc[i]);
+               mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
+
        return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
                               pptb_pl);
 }
@@ -249,6 +250,7 @@ static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
                return err;
 
        memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+       mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
 
        return 0;
 }
@@ -351,7 +353,8 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
        struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
        int err;
 
-       if (mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) {
+       if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
+           pfc->pfc_en) {
                netdev_err(dev, "PAUSE frames already enabled on port\n");
                return -EINVAL;
        }
@@ -371,6 +374,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
        }
 
        memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+       mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
 
        return 0;
 
index 7066954c39d682fe229bf8c151ac46047c4eae77..0a26b11ca8f61eff9f7c5b880b770a99948f07bf 100644 (file)
@@ -1151,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
                        enc28j60_phy_read(priv, PHIR);
                }
                /* TX complete handler */
-               if ((intflags & EIR_TXIF) != 0) {
+               if (((intflags & EIR_TXIF) != 0) &&
+                   ((intflags & EIR_TXERIF) == 0)) {
                        bool err = false;
                        loop++;
                        if (netif_msg_intr(priv))
@@ -1203,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
                                        enc28j60_tx_clear(ndev, true);
                        } else
                                enc28j60_tx_clear(ndev, true);
-                       locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+                       locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
                }
                /* RX Error handler */
                if ((intflags & EIR_RXERIF) != 0) {
@@ -1238,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
  */
 static void enc28j60_hw_tx(struct enc28j60_net *priv)
 {
+       BUG_ON(!priv->tx_skb);
+
        if (netif_msg_tx_queued(priv))
                printk(KERN_DEBUG DRV_NAME
                        ": Tx Packet Len:%d\n", priv->tx_skb->len);
index 607bb7d4514d83b3e4de32f7a8abe634f81a5d95..87c642d3b075b2bc9845ba2cdbb4204e788028c3 100644 (file)
@@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        tx_ring->tx_stats.tx_bytes += skb->len;
        tx_ring->tx_stats.xmit_called++;
 
+       /* Ensure writes are complete before HW fetches Tx descriptors */
+       wmb();
        qlcnic_update_cmd_producer(tx_ring);
 
        return NETDEV_TX_OK;
index a473c182c91d0e4f981640ee285486e4a48e95d8..e4071265be76f2240915879f13130fa30e592a39 100644 (file)
@@ -2804,7 +2804,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
                                priv->tx_path_in_lpi_mode = true;
                        if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
                                priv->tx_path_in_lpi_mode = false;
-                       if (status & CORE_IRQ_MTL_RX_OVERFLOW)
+                       if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
                                priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
                                                        priv->rx_tail_addr,
                                                        STMMAC_CHAN0);
index 922a443e3415bf0656ff7c989412958701d79f96..4ef605a9024752d85596c4b1e120148b70a7f561 100644 (file)
@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
 static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
 {
        if (!info->egress_timer_scheduled) {
-               mod_timer_pinned(&info->egress_timer, jiffies + 1);
+               mod_timer(&info->egress_timer, jiffies + 1);
                info->egress_timer_scheduled = true;
        }
 }
@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr)
                BUG();
 
        /* Initialize the egress timer. */
-       init_timer(&info->egress_timer);
+       init_timer_pinned(&info->egress_timer);
        info->egress_timer.data = (long)info;
        info->egress_timer.function = tile_net_handle_egress_timer;
 
index cc39cefeae453e62224b37fb7ea897be0db994a8..9b3dc3c61e00b5839fbd6c98ea6b3383b753df84 100644 (file)
@@ -1072,12 +1072,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
 
 static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
 {
+       struct geneve_dev *geneve = netdev_priv(dev);
        /* The max_mtu calculation does not take account of GENEVE
         * options, to avoid excluding potentially valid
         * configurations.
         */
-       int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
-               - dev->hard_header_len;
+       int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
+
+       if (geneve->remote.sa.sa_family == AF_INET6)
+               max_mtu -= sizeof(struct ipv6hdr);
+       else
+               max_mtu -= sizeof(struct iphdr);
 
        if (new_mtu < 68)
                return -EINVAL;
index 0e7eff7f1cd27b630c07a02f75c08a02f45ec6dd..8bcd78f9496638e30c313abb74d004729d0dae16 100644 (file)
@@ -2640,6 +2640,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
                u64_stats_update_begin(&secy_stats->syncp);
                secy_stats->stats.OutPktsUntagged++;
                u64_stats_update_end(&secy_stats->syncp);
+               skb->dev = macsec->real_dev;
                len = skb->len;
                ret = dev_queue_xmit(skb);
                count_tx(dev, ret, len);
index 2afa61b51d411d45bd0b673e6cc93125c2eb9927..91177a4a32ad21c1cbea044424659f5604cef7de 100644 (file)
@@ -57,6 +57,7 @@
 
 /* PHY CTRL bits */
 #define DP83867_PHYCR_FIFO_DEPTH_SHIFT         14
+#define DP83867_PHYCR_FIFO_DEPTH_MASK          (3 << 14)
 
 /* RGMIIDCTL bits */
 #define DP83867_RGMII_TX_CLK_DELAY_SHIFT       4
@@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
 static int dp83867_config_init(struct phy_device *phydev)
 {
        struct dp83867_private *dp83867;
-       int ret;
-       u16 val, delay;
+       int ret, val;
+       u16 delay;
 
        if (!phydev->priv) {
                dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
        }
 
        if (phy_interface_is_rgmii(phydev)) {
-               ret = phy_write(phydev, MII_DP83867_PHYCTRL,
-                       (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+               val = phy_read(phydev, MII_DP83867_PHYCTRL);
+               if (val < 0)
+                       return val;
+               val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+               val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+               ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
                if (ret)
                        return ret;
        }
index 8dedafa1a95d0b2f8e1db526cc64770876488f46..a30ee427efab3330492c9cda8ceb65259c190423 100644 (file)
@@ -2601,8 +2601,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
        spin_lock_bh(&pn->all_channels_lock);
        list_del(&pch->list);
        spin_unlock_bh(&pn->all_channels_lock);
-       put_net(pch->chan_net);
-       pch->chan_net = NULL;
 
        pch->file.dead = 1;
        wake_up_interruptible(&pch->file.rwait);
@@ -3136,6 +3134,9 @@ ppp_disconnect_channel(struct channel *pch)
  */
 static void ppp_destroy_channel(struct channel *pch)
 {
+       put_net(pch->chan_net);
+       pch->chan_net = NULL;
+
        atomic_dec(&channel_count);
 
        if (!pch->file.dead) {
index 53759c315b97aeeb54ce5b41fc0e4ec73ee8fac8..877c9516e78174dc41bd1e9d3f4c506d193134a9 100644 (file)
@@ -854,6 +854,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
        if (cdc_ncm_init(dev))
                goto error2;
 
+       /* Some firmwares need a pause here or they will silently fail
+        * to set up the interface properly.  This value was decided
+        * empirically on a Sierra Wireless MC7455 running 02.08.02.00
+        * firmware.
+        */
+       usleep_range(10000, 20000);
+
        /* configure data interface */
        temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
        if (temp) {
index 4e257b8d8f3e37e616e9ea5b5bf0de3d037bae90..e9654a6853818933334488c28a98dbb3073a84cf 100644 (file)
 #include <linux/mdio.h>
 #include <linux/usb/cdc.h>
 #include <linux/suspend.h>
+#include <linux/acpi.h>
 
 /* Information for net-next */
 #define NETNEXT_VERSION                "08"
 
 /* Information for net */
-#define NET_VERSION            "4"
+#define NET_VERSION            "5"
 
 #define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 /* SRAM_IMPEDANCE */
 #define RX_DRIVING_MASK                0x6000
 
+/* MAC PASSTHRU */
+#define AD_MASK                        0xfee0
+#define EFUSE                  0xcfdb
+#define PASS_THRU_MASK         0x1
+
 enum rtl_register_content {
        _1000bps        = 0x10,
        _100bps         = 0x08,
@@ -624,6 +630,7 @@ struct r8152 {
                int (*eee_get)(struct r8152 *, struct ethtool_eee *);
                int (*eee_set)(struct r8152 *, struct ethtool_eee *);
                bool (*in_nway)(struct r8152 *);
+               void (*autosuspend_en)(struct r8152 *tp, bool enable);
        } rtl_ops;
 
        int intr_interval;
@@ -1035,6 +1042,65 @@ out1:
        return ret;
 }
 
+/* Devices containing RTL8153-AD can support a persistent
+ * host system provided MAC address.
+ * Examples of this are Dell TB15 and Dell WD15 docks
+ */
+static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+{
+       acpi_status status;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       int ret = -EINVAL;
+       u32 ocp_data;
+       unsigned char buf[6];
+
+       /* test for -AD variant of RTL8153 */
+       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+       if ((ocp_data & AD_MASK) != 0x1000)
+               return -ENODEV;
+
+       /* test for MAC address pass-through bit */
+       ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+       if ((ocp_data & PASS_THRU_MASK) != 1)
+               return -ENODEV;
+
+       /* returns _AUXMAC_#AABBCCDDEEFF# */
+       status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+       obj = (union acpi_object *)buffer.pointer;
+       if (!ACPI_SUCCESS(status))
+               return -ENODEV;
+       if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+               netif_warn(tp, probe, tp->netdev,
+                          "Invalid buffer when reading pass-thru MAC addr: "
+                          "(%d, %d)\n",
+                          obj->type, obj->string.length);
+               goto amacout;
+       }
+       if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
+           strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
+               netif_warn(tp, probe, tp->netdev,
+                          "Invalid header when reading pass-thru MAC addr\n");
+               goto amacout;
+       }
+       ret = hex2bin(buf, obj->string.pointer + 9, 6);
+       if (!(ret == 0 && is_valid_ether_addr(buf))) {
+               netif_warn(tp, probe, tp->netdev,
+                          "Invalid MAC when reading pass-thru MAC addr: "
+                          "%d, %pM\n", ret, buf);
+               ret = -EINVAL;
+               goto amacout;
+       }
+       memcpy(sa->sa_data, buf, 6);
+       ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
+       netif_info(tp, probe, tp->netdev,
+                  "Using pass-thru MAC addr %pM\n", sa->sa_data);
+
+amacout:
+       kfree(obj);
+       return ret;
+}
+
 static int set_ethernet_addr(struct r8152 *tp)
 {
        struct net_device *dev = tp->netdev;
@@ -1043,8 +1109,15 @@ static int set_ethernet_addr(struct r8152 *tp)
 
        if (tp->version == RTL_VER_01)
                ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
-       else
-               ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+       else {
+               /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
+                * or system doesn't provide valid _SB.AMAC this will be
+                * be expected to non-zero
+                */
+               ret = vendor_mac_passthru_addr_read(tp, &sa);
+               if (ret < 0)
+                       ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+       }
 
        if (ret < 0) {
                netif_err(tp, probe, dev, "Get ether addr fail\n");
@@ -2295,10 +2368,6 @@ static u32 __rtl_get_wol(struct r8152 *tp)
        u32 ocp_data;
        u32 wolopts = 0;
 
-       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
-       if (!(ocp_data & LAN_WAKE_EN))
-               return 0;
-
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
        if (ocp_data & LINK_ON_WAKE_EN)
                wolopts |= WAKE_PHY;
@@ -2331,15 +2400,13 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
-       ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+       ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
        if (wolopts & WAKE_UCAST)
                ocp_data |= UWF_EN;
        if (wolopts & WAKE_BCAST)
                ocp_data |= BWF_EN;
        if (wolopts & WAKE_MCAST)
                ocp_data |= MWF_EN;
-       if (wolopts & WAKE_ANY)
-               ocp_data |= LAN_WAKE_EN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
 
        ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
@@ -2408,9 +2475,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
        if (enable) {
                u32 ocp_data;
 
-               r8153_u1u2en(tp, false);
-               r8153_u2p3en(tp, false);
-
                __rtl_set_wol(tp, WAKE_ANY);
 
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2421,7 +2485,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
 
                ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
        } else {
+               u32 ocp_data;
+
                __rtl_set_wol(tp, tp->saved_wolopts);
+
+               ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+               ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+               ocp_data &= ~LINK_OFF_WAKE_EN;
+               ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+               ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+       }
+}
+
+static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
+{
+       rtl_runtime_suspend_enable(tp, enable);
+
+       if (enable) {
+               r8153_u1u2en(tp, false);
+               r8153_u2p3en(tp, false);
+       } else {
                r8153_u2p3en(tp, true);
                r8153_u1u2en(tp, true);
        }
@@ -3512,7 +3597,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
                napi_disable(&tp->napi);
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                        rtl_stop_rx(tp);
-                       rtl_runtime_suspend_enable(tp, true);
+                       tp->rtl_ops.autosuspend_en(tp, true);
                } else {
                        cancel_delayed_work_sync(&tp->schedule);
                        tp->rtl_ops.down(tp);
@@ -3538,7 +3623,7 @@ static int rtl8152_resume(struct usb_interface *intf)
 
        if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
                if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-                       rtl_runtime_suspend_enable(tp, false);
+                       tp->rtl_ops.autosuspend_en(tp, false);
                        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
                        napi_disable(&tp->napi);
                        set_bit(WORK_ENABLE, &tp->flags);
@@ -3557,7 +3642,7 @@ static int rtl8152_resume(struct usb_interface *intf)
                usb_submit_urb(tp->intr_urb, GFP_KERNEL);
        } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
                if (tp->netdev->flags & IFF_UP)
-                       rtl_runtime_suspend_enable(tp, false);
+                       tp->rtl_ops.autosuspend_en(tp, false);
                clear_bit(SELECTIVE_SUSPEND, &tp->flags);
        }
 
@@ -4137,6 +4222,7 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->eee_get            = r8152_get_eee;
                ops->eee_set            = r8152_set_eee;
                ops->in_nway            = rtl8152_in_nway;
+               ops->autosuspend_en     = rtl_runtime_suspend_enable;
                break;
 
        case RTL_VER_03:
@@ -4152,6 +4238,7 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->eee_get            = r8153_get_eee;
                ops->eee_set            = r8153_set_eee;
                ops->in_nway            = rtl8153_in_nway;
+               ops->autosuspend_en     = rtl8153_runtime_enable;
                break;
 
        default:
@@ -4338,3 +4425,4 @@ module_usb_driver(rtl8152_driver);
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
index 61ba464049374593316e1c0b869774861ee79b5e..3bfb592093263bef36c4fa88de4fed60dbbb1ac1 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/mii.h>
 #include <linux/usb.h>
 #include <linux/usb/usbnet.h>
-#include <linux/usb/cdc.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/pm_runtime.h>
@@ -395,8 +394,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
        dev->hard_mtu = net->mtu + net->hard_header_len;
        if (dev->rx_urb_size == old_hard_mtu) {
                dev->rx_urb_size = dev->hard_mtu;
-               if (dev->rx_urb_size > old_rx_urb_size)
+               if (dev->rx_urb_size > old_rx_urb_size) {
+                       usbnet_pause_rx(dev);
                        usbnet_unlink_rx_urbs(dev);
+                       usbnet_resume_rx(dev);
+               }
        }
 
        /* max qlen depend on hard_mtu and rx_urb_size */
@@ -1508,8 +1510,9 @@ static void usbnet_bh (unsigned long param)
        } else if (netif_running (dev->net) &&
                   netif_device_present (dev->net) &&
                   netif_carrier_ok(dev->net) &&
-                  !timer_pending (&dev->delay) &&
-                  !test_bit (EVENT_RX_HALT, &dev->flags)) {
+                  !timer_pending(&dev->delay) &&
+                  !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
+                  !test_bit(EVENT_RX_HALT, &dev->flags)) {
                int     temp = dev->rxq.qlen;
 
                if (temp < RX_QLEN(dev)) {
@@ -1968,143 +1971,6 @@ out:
        return err;
 }
 
-int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
-                               struct usb_interface *intf,
-                               u8 *buffer,
-                               int buflen)
-{
-       /* duplicates are ignored */
-       struct usb_cdc_union_desc *union_header = NULL;
-
-       /* duplicates are not tolerated */
-       struct usb_cdc_header_desc *header = NULL;
-       struct usb_cdc_ether_desc *ether = NULL;
-       struct usb_cdc_mdlm_detail_desc *detail = NULL;
-       struct usb_cdc_mdlm_desc *desc = NULL;
-
-       unsigned int elength;
-       int cnt = 0;
-
-       memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
-       hdr->phonet_magic_present = false;
-       while (buflen > 0) {
-               elength = buffer[0];
-               if (!elength) {
-                       dev_err(&intf->dev, "skipping garbage byte\n");
-                       elength = 1;
-                       goto next_desc;
-               }
-               if (buffer[1] != USB_DT_CS_INTERFACE) {
-                       dev_err(&intf->dev, "skipping garbage\n");
-                       goto next_desc;
-               }
-
-               switch (buffer[2]) {
-               case USB_CDC_UNION_TYPE: /* we've found it */
-                       if (elength < sizeof(struct usb_cdc_union_desc))
-                               goto next_desc;
-                       if (union_header) {
-                               dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
-                               goto next_desc;
-                       }
-                       union_header = (struct usb_cdc_union_desc *)buffer;
-                       break;
-               case USB_CDC_COUNTRY_TYPE:
-                       if (elength < sizeof(struct usb_cdc_country_functional_desc))
-                               goto next_desc;
-                       hdr->usb_cdc_country_functional_desc =
-                               (struct usb_cdc_country_functional_desc *)buffer;
-                       break;
-               case USB_CDC_HEADER_TYPE:
-                       if (elength != sizeof(struct usb_cdc_header_desc))
-                               goto next_desc;
-                       if (header)
-                               return -EINVAL;
-                       header = (struct usb_cdc_header_desc *)buffer;
-                       break;
-               case USB_CDC_ACM_TYPE:
-                       if (elength < sizeof(struct usb_cdc_acm_descriptor))
-                               goto next_desc;
-                       hdr->usb_cdc_acm_descriptor =
-                               (struct usb_cdc_acm_descriptor *)buffer;
-                       break;
-               case USB_CDC_ETHERNET_TYPE:
-                       if (elength != sizeof(struct usb_cdc_ether_desc))
-                               goto next_desc;
-                       if (ether)
-                               return -EINVAL;
-                       ether = (struct usb_cdc_ether_desc *)buffer;
-                       break;
-               case USB_CDC_CALL_MANAGEMENT_TYPE:
-                       if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
-                               goto next_desc;
-                       hdr->usb_cdc_call_mgmt_descriptor =
-                               (struct usb_cdc_call_mgmt_descriptor *)buffer;
-                       break;
-               case USB_CDC_DMM_TYPE:
-                       if (elength < sizeof(struct usb_cdc_dmm_desc))
-                               goto next_desc;
-                       hdr->usb_cdc_dmm_desc =
-                               (struct usb_cdc_dmm_desc *)buffer;
-                       break;
-               case USB_CDC_MDLM_TYPE:
-                       if (elength < sizeof(struct usb_cdc_mdlm_desc *))
-                               goto next_desc;
-                       if (desc)
-                               return -EINVAL;
-                       desc = (struct usb_cdc_mdlm_desc *)buffer;
-                       break;
-               case USB_CDC_MDLM_DETAIL_TYPE:
-                       if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
-                               goto next_desc;
-                       if (detail)
-                               return -EINVAL;
-                       detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
-                       break;
-               case USB_CDC_NCM_TYPE:
-                       if (elength < sizeof(struct usb_cdc_ncm_desc))
-                               goto next_desc;
-                       hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
-                       break;
-               case USB_CDC_MBIM_TYPE:
-                       if (elength < sizeof(struct usb_cdc_mbim_desc))
-                               goto next_desc;
-
-                       hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
-                       break;
-               case USB_CDC_MBIM_EXTENDED_TYPE:
-                       if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
-                               break;
-                       hdr->usb_cdc_mbim_extended_desc =
-                               (struct usb_cdc_mbim_extended_desc *)buffer;
-                       break;
-               case CDC_PHONET_MAGIC_NUMBER:
-                       hdr->phonet_magic_present = true;
-                       break;
-               default:
-                       /*
-                        * there are LOTS more CDC descriptors that
-                        * could legitimately be found here.
-                        */
-                       dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
-                                       buffer[2], elength);
-                       goto next_desc;
-               }
-               cnt++;
-next_desc:
-               buflen -= elength;
-               buffer += elength;
-       }
-       hdr->usb_cdc_union_desc = union_header;
-       hdr->usb_cdc_header_desc = header;
-       hdr->usb_cdc_mdlm_detail_desc = detail;
-       hdr->usb_cdc_mdlm_desc = desc;
-       hdr->usb_cdc_ether_desc = ether;
-       return cnt;
-}
-
-EXPORT_SYMBOL(cdc_parse_cdc_header);
-
 /*
  * The function can't be called inside suspend/resume callback,
  * otherwise deadlock will be caused.
index 1a51584a382bf28dcf567a3fb039bc1a571cc144..d5fb55c0a9d95cdd2e6ac9cc99ca17f44b2d6ef7 100644 (file)
@@ -1394,19 +1394,22 @@ static int ns_cmp(void *priv, struct list_head *a, struct list_head *b)
        return nsa->ns_id - nsb->ns_id;
 }
 
-static struct nvme_ns *nvme_find_ns(struct nvme_ctrl *ctrl, unsigned nsid)
+static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
-       struct nvme_ns *ns;
-
-       lockdep_assert_held(&ctrl->namespaces_mutex);
+       struct nvme_ns *ns, *ret = NULL;
 
+       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry(ns, &ctrl->namespaces, list) {
-               if (ns->ns_id == nsid)
-                       return ns;
+               if (ns->ns_id == nsid) {
+                       kref_get(&ns->kref);
+                       ret = ns;
+                       break;
+               }
                if (ns->ns_id > nsid)
                        break;
        }
-       return NULL;
+       mutex_unlock(&ctrl->namespaces_mutex);
+       return ret;
 }
 
 static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
@@ -1415,8 +1418,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        struct gendisk *disk;
        int node = dev_to_node(ctrl->dev);
 
-       lockdep_assert_held(&ctrl->namespaces_mutex);
-
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
                return;
@@ -1457,7 +1458,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
        if (nvme_revalidate_disk(ns->disk))
                goto out_free_disk;
 
-       list_add_tail_rcu(&ns->list, &ctrl->namespaces);
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_add_tail(&ns->list, &ctrl->namespaces);
+       mutex_unlock(&ctrl->namespaces_mutex);
+
        kref_get(&ctrl->kref);
        if (ns->type == NVME_NS_LIGHTNVM)
                return;
@@ -1480,8 +1484,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 static void nvme_ns_remove(struct nvme_ns *ns)
 {
-       lockdep_assert_held(&ns->ctrl->namespaces_mutex);
-
        if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
                return;
 
@@ -1494,8 +1496,11 @@ static void nvme_ns_remove(struct nvme_ns *ns)
                blk_mq_abort_requeue_list(ns->queue);
                blk_cleanup_queue(ns->queue);
        }
+
+       mutex_lock(&ns->ctrl->namespaces_mutex);
        list_del_init(&ns->list);
-       synchronize_rcu();
+       mutex_unlock(&ns->ctrl->namespaces_mutex);
+
        nvme_put_ns(ns);
 }
 
@@ -1503,10 +1508,11 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 {
        struct nvme_ns *ns;
 
-       ns = nvme_find_ns(ctrl, nsid);
+       ns = nvme_find_get_ns(ctrl, nsid);
        if (ns) {
                if (revalidate_disk(ns->disk))
                        nvme_ns_remove(ns);
+               nvme_put_ns(ns);
        } else
                nvme_alloc_ns(ctrl, nsid);
 }
@@ -1535,9 +1541,11 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
                        nvme_validate_ns(ctrl, nsid);
 
                        while (++prev < nsid) {
-                               ns = nvme_find_ns(ctrl, prev);
-                               if (ns)
+                               ns = nvme_find_get_ns(ctrl, prev);
+                               if (ns) {
                                        nvme_ns_remove(ns);
+                                       nvme_put_ns(ns);
+                               }
                        }
                }
                nn -= j;
@@ -1552,8 +1560,6 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
        struct nvme_ns *ns, *next;
        unsigned i;
 
-       lockdep_assert_held(&ctrl->namespaces_mutex);
-
        for (i = 1; i <= nn; i++)
                nvme_validate_ns(ctrl, i);
 
@@ -1576,7 +1582,6 @@ static void nvme_scan_work(struct work_struct *work)
        if (nvme_identify_ctrl(ctrl, &id))
                return;
 
-       mutex_lock(&ctrl->namespaces_mutex);
        nn = le32_to_cpu(id->nn);
        if (ctrl->vs >= NVME_VS(1, 1) &&
            !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -1585,6 +1590,7 @@ static void nvme_scan_work(struct work_struct *work)
        }
        nvme_scan_ns_sequential(ctrl, nn);
  done:
+       mutex_lock(&ctrl->namespaces_mutex);
        list_sort(NULL, &ctrl->namespaces, ns_cmp);
        mutex_unlock(&ctrl->namespaces_mutex);
        kfree(id);
@@ -1604,6 +1610,11 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl)
 }
 EXPORT_SYMBOL_GPL(nvme_queue_scan);
 
+/*
+ * This function iterates the namespace list unlocked to allow recovery from
+ * controller failure. It is up to the caller to ensure the namespace list is
+ * not modified by scan work while this function is executing.
+ */
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns, *next;
@@ -1617,10 +1628,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
        if (ctrl->state == NVME_CTRL_DEAD)
                nvme_kill_queues(ctrl);
 
-       mutex_lock(&ctrl->namespaces_mutex);
        list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
                nvme_ns_remove(ns);
-       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
 
@@ -1791,11 +1800,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
-               if (!kref_get_unless_zero(&ns->kref))
-                       continue;
-
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
                /*
                 * Revalidating a dead namespace sets capacity to 0. This will
                 * end buffered writers dirtying pages that can't be synced.
@@ -1806,10 +1812,8 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
                blk_set_queue_dying(ns->queue);
                blk_mq_abort_requeue_list(ns->queue);
                blk_mq_start_stopped_hw_queues(ns->queue, true);
-
-               nvme_put_ns(ns);
        }
-       rcu_read_unlock();
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_kill_queues);
 
@@ -1817,8 +1821,8 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
                spin_lock_irq(ns->queue->queue_lock);
                queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
                spin_unlock_irq(ns->queue->queue_lock);
@@ -1826,7 +1830,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
                blk_mq_cancel_requeue_work(ns->queue);
                blk_mq_stop_hw_queues(ns->queue);
        }
-       rcu_read_unlock();
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_stop_queues);
 
@@ -1834,13 +1838,13 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
 {
        struct nvme_ns *ns;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(ns, &ctrl->namespaces, list) {
+       mutex_lock(&ctrl->namespaces_mutex);
+       list_for_each_entry(ns, &ctrl->namespaces, list) {
                queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
                blk_mq_start_stopped_hw_queues(ns->queue, true);
                blk_mq_kick_requeue_list(ns->queue);
        }
-       rcu_read_unlock();
+       mutex_unlock(&ctrl->namespaces_mutex);
 }
 EXPORT_SYMBOL_GPL(nvme_start_queues);
 
index 3041d48e71558f80131ee10c1295c7bca20ae083..f550c4596a7a92f2c8129ebef54272b7cd07ca32 100644 (file)
@@ -15,7 +15,8 @@ if NVMEM
 
 config NVMEM_IMX_OCOTP
        tristate "i.MX6 On-Chip OTP Controller support"
-       depends on SOC_IMX6
+       depends on SOC_IMX6 || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          This is a driver for the On-Chip OTP Controller (OCOTP) available on
          i.MX6 SoCs, providing access to 4 Kbits of one-time programmable
@@ -50,7 +51,6 @@ config MTK_EFUSE
        tristate "Mediatek SoCs EFUSE support"
        depends on ARCH_MEDIATEK || COMPILE_TEST
        depends on HAS_IOMEM
-       select REGMAP_MMIO
        help
          This is a driver to access hardware related data like sensor
          calibration, HDMI impedance etc.
index 75e66ef5b0ecb8045595139bb4ffd3d2970930a8..ac27b9bac3b9a9abe219e7d60fab8ea946c0abd8 100644 (file)
@@ -15,6 +15,7 @@
  * http://www.gnu.org/copyleft/gpl.html
  */
 
+#include <linux/clk.h>
 #include <linux/device.h>
 #include <linux/io.h>
 #include <linux/module.h>
@@ -26,6 +27,7 @@
 
 struct ocotp_priv {
        struct device *dev;
+       struct clk *clk;
        void __iomem *base;
        unsigned int nregs;
 };
@@ -36,7 +38,7 @@ static int imx_ocotp_read(void *context, unsigned int offset,
        struct ocotp_priv *priv = context;
        unsigned int count;
        u32 *buf = val;
-       int i;
+       int i, ret;
        u32 index;
 
        index = offset >> 2;
@@ -45,9 +47,16 @@ static int imx_ocotp_read(void *context, unsigned int offset,
        if (count > (priv->nregs - index))
                count = priv->nregs - index;
 
+       ret = clk_prepare_enable(priv->clk);
+       if (ret < 0) {
+               dev_err(priv->dev, "failed to prepare/enable ocotp clk\n");
+               return ret;
+       }
        for (i = index; i < (index + count); i++)
                *buf++ = readl(priv->base + 0x400 + i * 0x10);
 
+       clk_disable_unprepare(priv->clk);
+
        return 0;
 }
 
@@ -85,8 +94,12 @@ static int imx_ocotp_probe(struct platform_device *pdev)
        if (IS_ERR(priv->base))
                return PTR_ERR(priv->base);
 
+       priv->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(priv->clk))
+               return PTR_ERR(priv->clk);
+
        of_id = of_match_device(imx_ocotp_dt_ids, dev);
-       priv->nregs = (unsigned int)of_id->data;
+       priv->nregs = (unsigned long)of_id->data;
        imx_ocotp_nvmem_config.size = 4 * priv->nregs;
        imx_ocotp_nvmem_config.dev = dev;
        imx_ocotp_nvmem_config.priv = priv;
index 9c49369beea56b0ee4ded8cce15f306ef96aa37a..32fd572e18c54771c2023dbf562d18eb22eb3322 100644 (file)
 
 #include <linux/device.h>
 #include <linux/module.h>
+#include <linux/io.h>
 #include <linux/nvmem-provider.h>
 #include <linux/platform_device.h>
-#include <linux/regmap.h>
 
-static struct regmap_config mtk_regmap_config = {
-       .reg_bits = 32,
-       .val_bits = 32,
-       .reg_stride = 4,
-};
+static int mtk_reg_read(void *context,
+                       unsigned int reg, void *_val, size_t bytes)
+{
+       void __iomem *base = context;
+       u32 *val = _val;
+       int i = 0, words = bytes / 4;
+
+       while (words--)
+               *val++ = readl(base + reg + (i++ * 4));
+
+       return 0;
+}
+
+static int mtk_reg_write(void *context,
+                        unsigned int reg, void *_val, size_t bytes)
+{
+       void __iomem *base = context;
+       u32 *val = _val;
+       int i = 0, words = bytes / 4;
+
+       while (words--)
+               writel(*val++, base + reg + (i++ * 4));
+
+       return 0;
+}
 
 static int mtk_efuse_probe(struct platform_device *pdev)
 {
@@ -30,7 +50,6 @@ static int mtk_efuse_probe(struct platform_device *pdev)
        struct resource *res;
        struct nvmem_device *nvmem;
        struct nvmem_config *econfig;
-       struct regmap *regmap;
        void __iomem *base;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -42,14 +61,12 @@ static int mtk_efuse_probe(struct platform_device *pdev)
        if (!econfig)
                return -ENOMEM;
 
-       mtk_regmap_config.max_register = resource_size(res) - 1;
-
-       regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config);
-       if (IS_ERR(regmap)) {
-               dev_err(dev, "regmap init failed\n");
-               return PTR_ERR(regmap);
-       }
-
+       econfig->stride = 4;
+       econfig->word_size = 4;
+       econfig->reg_read = mtk_reg_read;
+       econfig->reg_write = mtk_reg_write;
+       econfig->size = resource_size(res);
+       econfig->priv = base;
        econfig->dev = dev;
        econfig->owner = THIS_MODULE;
        nvmem = nvmem_register(econfig);
index 2bb3c5799ac4b0146b2760d8823fb1eb28a6bdaa..d26dd03cec80fbd3310a8ad33e626f7d28d117b9 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/nvmem-provider.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
-#include <linux/regmap.h>
 #include <linux/slab.h>
 #include <linux/stmp_device.h>
 
@@ -66,11 +65,10 @@ static int mxs_ocotp_wait(struct mxs_ocotp *otp)
        return 0;
 }
 
-static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
-                         void *val, size_t val_size)
+static int mxs_ocotp_read(void *context, unsigned int offset,
+                         void *val, size_t bytes)
 {
        struct mxs_ocotp *otp = context;
-       unsigned int offset = *(u32 *)reg;
        u32 *buf = val;
        int ret;
 
@@ -94,17 +92,16 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
        if (ret)
                goto close_banks;
 
-       while (val_size >= reg_size) {
+       while (bytes) {
                if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
                        /* fill up non-data register */
-                       *buf = 0;
+                       *buf++ = 0;
                } else {
-                       *buf = readl(otp->base + offset);
+                       *buf++ = readl(otp->base + offset);
                }
 
-               buf++;
-               val_size -= reg_size;
-               offset += reg_size;
+               bytes -= 4;
+               offset += 4;
        }
 
 close_banks:
@@ -117,57 +114,29 @@ disable_clk:
        return ret;
 }
 
-static int mxs_ocotp_write(void *context, const void *data, size_t count)
-{
-       /* We don't want to support writing */
-       return 0;
-}
-
-static bool mxs_ocotp_writeable_reg(struct device *dev, unsigned int reg)
-{
-       return false;
-}
-
 static struct nvmem_config ocotp_config = {
        .name = "mxs-ocotp",
+       .stride = 16,
+       .word_size = 4,
        .owner = THIS_MODULE,
+       .reg_read = mxs_ocotp_read,
 };
 
-static const struct regmap_range imx23_ranges[] = {
-       regmap_reg_range(OCOTP_DATA_OFFSET, 0x210),
-};
-
-static const struct regmap_access_table imx23_access = {
-       .yes_ranges = imx23_ranges,
-       .n_yes_ranges = ARRAY_SIZE(imx23_ranges),
-};
-
-static const struct regmap_range imx28_ranges[] = {
-       regmap_reg_range(OCOTP_DATA_OFFSET, 0x290),
-};
-
-static const struct regmap_access_table imx28_access = {
-       .yes_ranges = imx28_ranges,
-       .n_yes_ranges = ARRAY_SIZE(imx28_ranges),
+struct mxs_data {
+       int size;
 };
 
-static struct regmap_bus mxs_ocotp_bus = {
-       .read = mxs_ocotp_read,
-       .write = mxs_ocotp_write, /* make regmap_init() happy */
-       .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
-       .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+static const struct mxs_data imx23_data = {
+       .size = 0x220,
 };
 
-static struct regmap_config mxs_ocotp_config = {
-       .reg_bits = 32,
-       .val_bits = 32,
-       .reg_stride = 16,
-       .writeable_reg = mxs_ocotp_writeable_reg,
+static const struct mxs_data imx28_data = {
+       .size = 0x2a0,
 };
 
 static const struct of_device_id mxs_ocotp_match[] = {
-       { .compatible = "fsl,imx23-ocotp", .data = &imx23_access },
-       { .compatible = "fsl,imx28-ocotp", .data = &imx28_access },
+       { .compatible = "fsl,imx23-ocotp", .data = &imx23_data },
+       { .compatible = "fsl,imx28-ocotp", .data = &imx28_data },
        { /* sentinel */},
 };
 MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
@@ -175,11 +144,10 @@ MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
 static int mxs_ocotp_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
+       const struct mxs_data *data;
        struct mxs_ocotp *otp;
        struct resource *res;
        const struct of_device_id *match;
-       struct regmap *regmap;
-       const struct regmap_access_table *access;
        int ret;
 
        match = of_match_device(dev->driver->of_match_table, dev);
@@ -205,17 +173,10 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
                return ret;
        }
 
-       access = match->data;
-       mxs_ocotp_config.rd_table = access;
-       mxs_ocotp_config.max_register = access->yes_ranges[0].range_max;
-
-       regmap = devm_regmap_init(dev, &mxs_ocotp_bus, otp, &mxs_ocotp_config);
-       if (IS_ERR(regmap)) {
-               dev_err(dev, "regmap init failed\n");
-               ret = PTR_ERR(regmap);
-               goto err_clk;
-       }
+       data = match->data;
 
+       ocotp_config.size = data->size;
+       ocotp_config.priv = otp;
        ocotp_config.dev = dev;
        otp->nvmem = nvmem_register(&ocotp_config);
        if (IS_ERR(otp->nvmem)) {
index 1fa6925733d31bacc6118fc20f36e65c12ad3f8c..8db5079f09a7826bdf66dc94f02c19e902f99e6e 100644 (file)
@@ -51,6 +51,9 @@ obj-$(CONFIG_ACPI)    += pci-acpi.o
 # SMBIOS provided firmware instance and labels
 obj-$(CONFIG_PCI_LABEL) += pci-label.o
 
+# Intel MID platform PM support
+obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
+
 obj-$(CONFIG_PCI_SYSCALL) += syscall.o
 
 obj-$(CONFIG_PCI_STUB) += pci-stub.o
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
new file mode 100644 (file)
index 0000000..c878aa7
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Intel MID platform PM support
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/intel-mid.h>
+
+#include "pci.h"
+
+static bool mid_pci_power_manageable(struct pci_dev *dev)
+{
+       return true;
+}
+
+static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+       return intel_mid_pci_set_power_state(pdev, state);
+}
+
+static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
+{
+       return PCI_D3hot;
+}
+
+static int mid_pci_sleep_wake(struct pci_dev *dev, bool enable)
+{
+       return 0;
+}
+
+static int mid_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+       return 0;
+}
+
+static bool mid_pci_need_resume(struct pci_dev *dev)
+{
+       return false;
+}
+
+static struct pci_platform_pm_ops mid_pci_platform_pm = {
+       .is_manageable  = mid_pci_power_manageable,
+       .set_state      = mid_pci_set_power_state,
+       .choose_state   = mid_pci_choose_state,
+       .sleep_wake     = mid_pci_sleep_wake,
+       .run_wake       = mid_pci_run_wake,
+       .need_resume    = mid_pci_need_resume,
+};
+
+#define ICPU(model)    { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
+
+static const struct x86_cpu_id lpss_cpu_ids[] = {
+       ICPU(INTEL_FAM6_ATOM_MERRIFIELD1),
+       {}
+};
+
+static int __init mid_pci_init(void)
+{
+       const struct x86_cpu_id *id;
+
+       id = x86_match_cpu(lpss_cpu_ids);
+       if (id)
+               pci_set_platform_pm(&mid_pci_platform_pm);
+       return 0;
+}
+arch_initcall(mid_pci_init);
index b869b98835f4f5c4e4f3df5dfc998427af13a452..cc0b695453853f1ab30a005868c4e4f37e45b01f 100644 (file)
@@ -44,6 +44,16 @@ config ARMADA375_USBCLUSTER_PHY
        depends on OF && HAS_IOMEM
        select GENERIC_PHY
 
+config PHY_DA8XX_USB
+       tristate "TI DA8xx USB PHY Driver"
+       depends on ARCH_DAVINCI_DA8XX
+       select GENERIC_PHY
+       select MFD_SYSCON
+       help
+         Enable this to support the USB PHY on DA8xx SoCs.
+
+         This driver controls both the USB 1.1 PHY and the USB 2.0 PHY.
+
 config PHY_DM816X_USB
        tristate "TI dm816x USB PHY driver"
        depends on ARCH_OMAP2PLUS
@@ -176,6 +186,7 @@ config TWL4030_USB
        tristate "TWL4030 USB Transceiver Driver"
        depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
        depends on USB_SUPPORT
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't 'y'
        select GENERIC_PHY
        select USB_PHY
        help
index 9c3e73ccabc4193c92f02e3b077959b987d76a70..fa8480e89724d50ab94ae747f574fe78c924c4b1 100644 (file)
@@ -6,6 +6,7 @@ obj-$(CONFIG_GENERIC_PHY)               += phy-core.o
 obj-$(CONFIG_PHY_BCM_NS_USB2)          += phy-bcm-ns-usb2.o
 obj-$(CONFIG_PHY_BERLIN_USB)           += phy-berlin-usb.o
 obj-$(CONFIG_PHY_BERLIN_SATA)          += phy-berlin-sata.o
+obj-$(CONFIG_PHY_DA8XX_USB)            += phy-da8xx-usb.o
 obj-$(CONFIG_PHY_DM816X_USB)           += phy-dm816x-usb.o
 obj-$(CONFIG_ARMADA375_USBCLUSTER_PHY) += phy-armada375-usb2.o
 obj-$(CONFIG_BCM_KONA_USB2_PHY)                += phy-bcm-kona-usb2.o
index b72e9a3b64297dd8a78e75cb2d81bd2d9ce94bfe..8eca906b6e70b463b63a49998f55f8813d54dc92 100644 (file)
@@ -342,6 +342,21 @@ int phy_power_off(struct phy *phy)
 }
 EXPORT_SYMBOL_GPL(phy_power_off);
 
+int phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+       int ret;
+
+       if (!phy || !phy->ops->set_mode)
+               return 0;
+
+       mutex_lock(&phy->mutex);
+       ret = phy->ops->set_mode(phy, mode);
+       mutex_unlock(&phy->mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(phy_set_mode);
+
 /**
  * _of_phy_get() - lookup and obtain a reference to a phy by phandle
  * @np: device_node for which to get the phy
diff --git a/drivers/phy/phy-da8xx-usb.c b/drivers/phy/phy-da8xx-usb.c
new file mode 100644 (file)
index 0000000..b2e59b6
--- /dev/null
@@ -0,0 +1,245 @@
+/*
+ * phy-da8xx-usb - TI DaVinci DA8xx USB PHY driver
+ *
+ * Copyright (C) 2016 David Lechner <david@lechnology.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/mfd/da8xx-cfgchip.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+struct da8xx_usb_phy {
+       struct phy_provider     *phy_provider;
+       struct phy              *usb11_phy;
+       struct phy              *usb20_phy;
+       struct clk              *usb11_clk;
+       struct clk              *usb20_clk;
+       struct regmap           *regmap;
+};
+
+static int da8xx_usb11_phy_power_on(struct phy *phy)
+{
+       struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+       int ret;
+
+       ret = clk_prepare_enable(d_phy->usb11_clk);
+       if (ret)
+               return ret;
+
+       regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_USB1SUSPENDM,
+                         CFGCHIP2_USB1SUSPENDM);
+
+       return 0;
+}
+
+static int da8xx_usb11_phy_power_off(struct phy *phy)
+{
+       struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+
+       regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_USB1SUSPENDM, 0);
+
+       clk_disable_unprepare(d_phy->usb11_clk);
+
+       return 0;
+}
+
+static const struct phy_ops da8xx_usb11_phy_ops = {
+       .power_on       = da8xx_usb11_phy_power_on,
+       .power_off      = da8xx_usb11_phy_power_off,
+       .owner          = THIS_MODULE,
+};
+
+static int da8xx_usb20_phy_power_on(struct phy *phy)
+{
+       struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+       int ret;
+
+       ret = clk_prepare_enable(d_phy->usb20_clk);
+       if (ret)
+               return ret;
+
+       regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGPWRDN, 0);
+
+       return 0;
+}
+
+static int da8xx_usb20_phy_power_off(struct phy *phy)
+{
+       struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+
+       regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGPWRDN,
+                         CFGCHIP2_OTGPWRDN);
+
+       clk_disable_unprepare(d_phy->usb20_clk);
+
+       return 0;
+}
+
+static int da8xx_usb20_phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+       struct da8xx_usb_phy *d_phy = phy_get_drvdata(phy);
+       u32 val;
+
+       switch (mode) {
+       case PHY_MODE_USB_HOST:         /* Force VBUS valid, ID = 0 */
+               val = CFGCHIP2_OTGMODE_FORCE_HOST;
+               break;
+       case PHY_MODE_USB_DEVICE:       /* Force VBUS valid, ID = 1 */
+               val = CFGCHIP2_OTGMODE_FORCE_DEVICE;
+               break;
+       case PHY_MODE_USB_OTG:  /* Don't override the VBUS/ID comparators */
+               val = CFGCHIP2_OTGMODE_NO_OVERRIDE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       regmap_write_bits(d_phy->regmap, CFGCHIP(2), CFGCHIP2_OTGMODE_MASK,
+                         val);
+
+       return 0;
+}
+
+static const struct phy_ops da8xx_usb20_phy_ops = {
+       .power_on       = da8xx_usb20_phy_power_on,
+       .power_off      = da8xx_usb20_phy_power_off,
+       .set_mode       = da8xx_usb20_phy_set_mode,
+       .owner          = THIS_MODULE,
+};
+
+static struct phy *da8xx_usb_phy_of_xlate(struct device *dev,
+                                        struct of_phandle_args *args)
+{
+       struct da8xx_usb_phy *d_phy = dev_get_drvdata(dev);
+
+       if (!d_phy)
+               return ERR_PTR(-ENODEV);
+
+       switch (args->args[0]) {
+       case 0:
+               return d_phy->usb20_phy;
+       case 1:
+               return d_phy->usb11_phy;
+       default:
+               return ERR_PTR(-EINVAL);
+       }
+}
+
+static int da8xx_usb_phy_probe(struct platform_device *pdev)
+{
+       struct device           *dev = &pdev->dev;
+       struct device_node      *node = dev->of_node;
+       struct da8xx_usb_phy    *d_phy;
+
+       d_phy = devm_kzalloc(dev, sizeof(*d_phy), GFP_KERNEL);
+       if (!d_phy)
+               return -ENOMEM;
+
+       if (node)
+               d_phy->regmap = syscon_regmap_lookup_by_compatible(
+                                                       "ti,da830-cfgchip");
+       else
+               d_phy->regmap = syscon_regmap_lookup_by_pdevname("syscon.0");
+       if (IS_ERR(d_phy->regmap)) {
+               dev_err(dev, "Failed to get syscon\n");
+               return PTR_ERR(d_phy->regmap);
+       }
+
+       d_phy->usb11_clk = devm_clk_get(dev, "usb11_phy");
+       if (IS_ERR(d_phy->usb11_clk)) {
+               dev_err(dev, "Failed to get usb11_phy clock\n");
+               return PTR_ERR(d_phy->usb11_clk);
+       }
+
+       d_phy->usb20_clk = devm_clk_get(dev, "usb20_phy");
+       if (IS_ERR(d_phy->usb20_clk)) {
+               dev_err(dev, "Failed to get usb20_phy clock\n");
+               return PTR_ERR(d_phy->usb20_clk);
+       }
+
+       d_phy->usb11_phy = devm_phy_create(dev, node, &da8xx_usb11_phy_ops);
+       if (IS_ERR(d_phy->usb11_phy)) {
+               dev_err(dev, "Failed to create usb11 phy\n");
+               return PTR_ERR(d_phy->usb11_phy);
+       }
+
+       d_phy->usb20_phy = devm_phy_create(dev, node, &da8xx_usb20_phy_ops);
+       if (IS_ERR(d_phy->usb20_phy)) {
+               dev_err(dev, "Failed to create usb20 phy\n");
+               return PTR_ERR(d_phy->usb20_phy);
+       }
+
+       platform_set_drvdata(pdev, d_phy);
+       phy_set_drvdata(d_phy->usb11_phy, d_phy);
+       phy_set_drvdata(d_phy->usb20_phy, d_phy);
+
+       if (node) {
+               d_phy->phy_provider = devm_of_phy_provider_register(dev,
+                                                       da8xx_usb_phy_of_xlate);
+               if (IS_ERR(d_phy->phy_provider)) {
+                       dev_err(dev, "Failed to create phy provider\n");
+                       return PTR_ERR(d_phy->phy_provider);
+               }
+       } else {
+               int ret;
+
+               ret = phy_create_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+               if (ret)
+                       dev_warn(dev, "Failed to create usb11 phy lookup\n");
+               ret = phy_create_lookup(d_phy->usb20_phy, "usb-phy",
+                                       "musb-da8xx");
+               if (ret)
+                       dev_warn(dev, "Failed to create usb20 phy lookup\n");
+       }
+
+       return 0;
+}
+
+static int da8xx_usb_phy_remove(struct platform_device *pdev)
+{
+       struct da8xx_usb_phy *d_phy = platform_get_drvdata(pdev);
+
+       if (!pdev->dev.of_node) {
+               phy_remove_lookup(d_phy->usb20_phy, "usb-phy", "musb-da8xx");
+               phy_remove_lookup(d_phy->usb11_phy, "usb-phy", "ohci.0");
+       }
+
+       return 0;
+}
+
+static const struct of_device_id da8xx_usb_phy_ids[] = {
+       { .compatible = "ti,da830-usb-phy" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, da8xx_usb_phy_ids);
+
+static struct platform_driver da8xx_usb_phy_driver = {
+       .probe  = da8xx_usb_phy_probe,
+       .remove = da8xx_usb_phy_remove,
+       .driver = {
+               .name   = "da8xx-usb-phy",
+               .of_match_table = da8xx_usb_phy_ids,
+       },
+};
+
+module_platform_driver(da8xx_usb_phy_driver);
+
+MODULE_ALIAS("platform:da8xx-usb-phy");
+MODULE_AUTHOR("David Lechner <david@lechnology.com>");
+MODULE_DESCRIPTION("TI DA8xx USB PHY driver");
+MODULE_LICENSE("GPL v2");
index 56631e77c11d7fb2ebd5e978869797d910b5daaf..6ee51490f78603cb52d361b7d8b9258b4db350a3 100644 (file)
@@ -140,7 +140,6 @@ static int ufs_qcom_phy_qmp_14nm_probe(struct platform_device *pdev)
 
        phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
        if (!phy) {
-               dev_err(dev, "%s: failed to allocate phy\n", __func__);
                err = -ENOMEM;
                goto out;
        }
index b16ea77d07b923c1a29f57def315bcdc032a9d40..770087ab05e29264d797231eb64e3090fb343142 100644 (file)
@@ -196,7 +196,6 @@ static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
 
        phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
        if (!phy) {
-               dev_err(dev, "%s: failed to allocate phy\n", __func__);
                err = -ENOMEM;
                goto out;
        }
index 4be3f5dbbc9f1d8b9dd416e8b930cb24b6ee3919..31156c9c4707e81807f86e9e053dba25174c0b51 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
 
 /******* USB2.0 Host registers (original offset is +0x200) *******/
 #define USB2_INT_ENABLE                0x000
@@ -81,9 +82,25 @@ struct rcar_gen3_chan {
        struct extcon_dev *extcon;
        struct phy *phy;
        struct regulator *vbus;
+       struct work_struct work;
+       bool extcon_host;
        bool has_otg;
 };
 
+static void rcar_gen3_phy_usb2_work(struct work_struct *work)
+{
+       struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan,
+                                                work);
+
+       if (ch->extcon_host) {
+               extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, true);
+               extcon_set_cable_state_(ch->extcon, EXTCON_USB, false);
+       } else {
+               extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, false);
+               extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
+       }
+}
+
 static void rcar_gen3_set_host_mode(struct rcar_gen3_chan *ch, int host)
 {
        void __iomem *usb2_base = ch->base;
@@ -130,8 +147,8 @@ static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch)
        rcar_gen3_set_host_mode(ch, 1);
        rcar_gen3_enable_vbus_ctrl(ch, 1);
 
-       extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, true);
-       extcon_set_cable_state_(ch->extcon, EXTCON_USB, false);
+       ch->extcon_host = true;
+       schedule_work(&ch->work);
 }
 
 static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
@@ -140,8 +157,8 @@ static void rcar_gen3_init_for_peri(struct rcar_gen3_chan *ch)
        rcar_gen3_set_host_mode(ch, 0);
        rcar_gen3_enable_vbus_ctrl(ch, 0);
 
-       extcon_set_cable_state_(ch->extcon, EXTCON_USB_HOST, false);
-       extcon_set_cable_state_(ch->extcon, EXTCON_USB, true);
+       ch->extcon_host = false;
+       schedule_work(&ch->work);
 }
 
 static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
@@ -301,6 +318,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
        if (irq >= 0) {
                int ret;
 
+               INIT_WORK(&channel->work, rcar_gen3_phy_usb2_work);
                irq = devm_request_irq(dev, irq, rcar_gen3_phy_usb2_irq,
                                       IRQF_SHARED, dev_name(dev), channel);
                if (irq < 0)
index d60b149cff0fa7f44c28bea07d55579f9efc80d7..2a7381f4fe4c81598188a14e7a9bbcd76f0fed16 100644 (file)
@@ -236,9 +236,10 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
                        goto err_clk_prov;
        }
 
-       err = devm_add_action(base->dev, rockchip_usb_phy_action, rk_phy);
+       err = devm_add_action_or_reset(base->dev, rockchip_usb_phy_action,
+                                      rk_phy);
        if (err)
-               goto err_devm_action;
+               return err;
 
        rk_phy->phy = devm_phy_create(base->dev, child, &ops);
        if (IS_ERR(rk_phy->phy)) {
@@ -256,9 +257,6 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
        else
                return rockchip_usb_phy_power(rk_phy, 1);
 
-err_devm_action:
-       if (!rk_phy->uart_enabled)
-               of_clk_del_provider(child);
 err_clk_prov:
        if (!rk_phy->uart_enabled)
                clk_unregister(rk_phy->clk480m);
@@ -397,8 +395,13 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
        phy_base->pdata = match->data;
 
        phy_base->dev = dev;
-       phy_base->reg_base = syscon_regmap_lookup_by_phandle(dev->of_node,
-                                                            "rockchip,grf");
+       phy_base->reg_base = ERR_PTR(-ENODEV);
+       if (dev->parent && dev->parent->of_node)
+               phy_base->reg_base = syscon_node_to_regmap(
+                                               dev->parent->of_node);
+       if (IS_ERR(phy_base->reg_base))
+               phy_base->reg_base = syscon_regmap_lookup_by_phandle(
+                                               dev->of_node, "rockchip,grf");
        if (IS_ERR(phy_base->reg_base)) {
                dev_err(&pdev->dev, "Missing rockchip,grf property\n");
                return PTR_ERR(phy_base->reg_base);
@@ -463,7 +466,11 @@ static int __init rockchip_init_usb_uart(void)
                return -ENOTSUPP;
        }
 
-       grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+       grf = ERR_PTR(-ENODEV);
+       if (np->parent)
+               grf = syscon_node_to_regmap(np->parent);
+       if (IS_ERR(grf))
+               grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
        if (IS_ERR(grf)) {
                pr_err("%s: Missing rockchip,grf property, %lu\n",
                       __func__, PTR_ERR(grf));
index de3101fbbf40e1f3c77e7a1f22c6929c2db8fb01..0a45bc6088aeb14e3da89b1240d95173019e20f9 100644 (file)
@@ -94,6 +94,7 @@
 
 enum sun4i_usb_phy_type {
        sun4i_a10_phy,
+       sun6i_a31_phy,
        sun8i_a33_phy,
        sun8i_h3_phy,
 };
@@ -122,7 +123,6 @@ struct sun4i_usb_phy_data {
        /* phy0 / otg related variables */
        struct extcon_dev *extcon;
        bool phy0_init;
-       bool phy0_poll;
        struct gpio_desc *id_det_gpio;
        struct gpio_desc *vbus_det_gpio;
        struct power_supply *vbus_power_supply;
@@ -343,6 +343,24 @@ static bool sun4i_usb_phy0_have_vbus_det(struct sun4i_usb_phy_data *data)
        return data->vbus_det_gpio || data->vbus_power_supply;
 }
 
+static bool sun4i_usb_phy0_poll(struct sun4i_usb_phy_data *data)
+{
+       if ((data->id_det_gpio && data->id_det_irq <= 0) ||
+           (data->vbus_det_gpio && data->vbus_det_irq <= 0))
+               return true;
+
+       /*
+        * The A31 companion pmic (axp221) does not generate vbus change
+        * interrupts when the board is driving vbus, so we must poll
+        * when using the pmic for vbus-det _and_ we're driving vbus.
+        */
+       if (data->cfg->type == sun6i_a31_phy &&
+           data->vbus_power_supply && data->phys[0].regulator_on)
+               return true;
+
+       return false;
+}
+
 static int sun4i_usb_phy_power_on(struct phy *_phy)
 {
        struct sun4i_usb_phy *phy = phy_get_drvdata(_phy);
@@ -364,7 +382,7 @@ static int sun4i_usb_phy_power_on(struct phy *_phy)
        phy->regulator_on = true;
 
        /* We must report Vbus high within OTG_TIME_A_WAIT_VRISE msec. */
-       if (phy->index == 0 && data->vbus_det_gpio && data->phy0_poll)
+       if (phy->index == 0 && sun4i_usb_phy0_poll(data))
                mod_delayed_work(system_wq, &data->detect, DEBOUNCE_TIME);
 
        return 0;
@@ -385,7 +403,7 @@ static int sun4i_usb_phy_power_off(struct phy *_phy)
         * phy0 vbus typically slowly discharges, sometimes this causes the
         * Vbus gpio to not trigger an edge irq on Vbus off, so force a rescan.
         */
-       if (phy->index == 0 && data->vbus_det_gpio && !data->phy0_poll)
+       if (phy->index == 0 && !sun4i_usb_phy0_poll(data))
                mod_delayed_work(system_wq, &data->detect, POLL_TIME);
 
        return 0;
@@ -468,7 +486,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
        if (vbus_notify)
                extcon_set_cable_state_(data->extcon, EXTCON_USB, vbus_det);
 
-       if (data->phy0_poll)
+       if (sun4i_usb_phy0_poll(data))
                queue_delayed_work(system_wq, &data->detect, POLL_TIME);
 }
 
@@ -644,11 +662,6 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
        }
 
        data->id_det_irq = gpiod_to_irq(data->id_det_gpio);
-       data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
-       if ((data->id_det_gpio && data->id_det_irq <= 0) ||
-           (data->vbus_det_gpio && data->vbus_det_irq <= 0))
-               data->phy0_poll = true;
-
        if (data->id_det_irq > 0) {
                ret = devm_request_irq(dev, data->id_det_irq,
                                sun4i_usb_phy0_id_vbus_det_irq,
@@ -660,6 +673,7 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
                }
        }
 
+       data->vbus_det_irq = gpiod_to_irq(data->vbus_det_gpio);
        if (data->vbus_det_irq > 0) {
                ret = devm_request_irq(dev, data->vbus_det_irq,
                                sun4i_usb_phy0_id_vbus_det_irq,
@@ -711,7 +725,7 @@ static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
 
 static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
        .num_phys = 3,
-       .type = sun4i_a10_phy,
+       .type = sun6i_a31_phy,
        .disc_thresh = 3,
        .phyctl_offset = REG_PHYCTL_A10,
        .dedicated_clocks = true,
index 385362e5b2f6fbee91a14ff9ba2afb9b14edcb7c..ae266e0c8368b1bb8595f50ff748daf6fc70f140 100644 (file)
@@ -518,7 +518,7 @@ enum clk_type_t {
        CLK_INT_SING = 2,       /* Internal single ended */
 };
 
-enum phy_mode {
+enum xgene_phy_mode {
        MODE_SATA       = 0,    /* List them for simple reference */
        MODE_SGMII      = 1,
        MODE_PCIE       = 2,
@@ -542,7 +542,7 @@ struct xgene_sata_override_param {
 struct xgene_phy_ctx {
        struct device *dev;
        struct phy *phy;
-       enum phy_mode mode;             /* Mode of operation */
+       enum xgene_phy_mode mode;               /* Mode of operation */
        enum clk_type_t clk_type;       /* Input clock selection */
        void __iomem *sds_base;         /* PHY CSR base addr */
        struct clk *clk;                /* Optional clock */
index 6d8ee3b1587276494274617d65029534df1d1af6..8abd80dbcbed7974b4ace4265dd9dad1bca89edd 100644 (file)
@@ -151,13 +151,19 @@ static long ec_device_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
                goto exit;
        }
 
+       if (u_cmd.outsize != s_cmd->outsize ||
+           u_cmd.insize != s_cmd->insize) {
+               ret = -EINVAL;
+               goto exit;
+       }
+
        s_cmd->command += ec->cmd_offset;
        ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
        /* Only copy data to userland if data was received. */
        if (ret < 0)
                goto exit;
 
-       if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + u_cmd.insize))
+       if (copy_to_user(arg, s_cmd, sizeof(*s_cmd) + s_cmd->insize))
                ret = -EFAULT;
 exit:
        kfree(s_cmd);
index 2776bec89c88ca3bcc012158d7a39eb7960f4b87..e57f923eea0b6a511f851a0d9413787edc7309d3 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/seq_file.h>
 
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include <asm/pmc_core.h>
 
 #include "intel_pmc_core.h"
@@ -138,10 +139,10 @@ static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
 #endif /* CONFIG_DEBUG_FS */
 
 static const struct x86_cpu_id intel_pmc_core_ids[] = {
-       { X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT,
-               (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
-       { X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT,
-               (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_MOBILE, X86_FEATURE_MWAIT,
+               (kernel_ulong_t)NULL},
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_DESKTOP, X86_FEATURE_MWAIT,
+               (kernel_ulong_t)NULL},
        {}
 };
 
index f5134acd6ff05d8a5eac080eed9f4b26a96847f3..815a7c5e9566f0f4f2237c08431a16f20438a97f 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/suspend.h>
 
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include <asm/intel_pmc_ipc.h>
 #include <asm/intel_punit_ipc.h>
 #include <asm/intel_telemetry.h>
@@ -331,7 +332,7 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = {
 };
 
 static const struct x86_cpu_id telemetry_debugfs_cpu_ids[] = {
-       TELEM_DEBUGFS_CPU(0x5c, telem_apl_debugfs_conf),
+       TELEM_DEBUGFS_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_debugfs_conf),
        {}
 };
 
index 09c84a2b1c2cef7ecbeaf19795dfdd2048c5c66e..6d884f7d1b9f4d950d2bf5799a688a5d978e893f 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/platform_device.h>
 
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include <asm/intel_pmc_ipc.h>
 #include <asm/intel_punit_ipc.h>
 #include <asm/intel_telemetry.h>
@@ -163,7 +164,7 @@ static struct telemetry_plt_config telem_apl_config = {
 };
 
 static const struct x86_cpu_id telemetry_cpu_ids[] = {
-       TELEM_CPU(0x5c, telem_apl_config),
+       TELEM_CPU(INTEL_FAM6_ATOM_GOLDMONT, telem_apl_config),
        {}
 };
 
index 5edee645d890e6037578da6f26aa0be5daaf61d0..262285e48a09481807940bea4aa4e53e45f9bb3d 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/isapnp.h>
 #include <linux/proc_fs.h>
 #include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 extern struct pnp_protocol isapnp_protocol;
 
index 421770ddafa3c5ceb9f38e8cb35c2453b3dd2312..0f11a0f4c369f36822929a6182d6f7af7caf9a77 100644 (file)
@@ -309,6 +309,7 @@ config BATTERY_RX51
 config CHARGER_ISP1704
        tristate "ISP1704 USB Charger Detection"
        depends on USB_PHY
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
        help
          Say Y to enable support for USB Charger Detection with
          ISP1707/ISP1704 USB transceivers.
index e4d569f57acc90d1dd02840b5f2f9f4d0e4ba152..4030eeb7cf65a885c3d5a7be1f68ae984cbd6c04 100644 (file)
 
 #define AXP288_EXTCON_DEV_NAME         "axp288_extcon"
 
-#define AXP288_EXTCON_SLOW_CHARGER             "SLOW-CHARGER"
-#define AXP288_EXTCON_DOWNSTREAM_CHARGER       "CHARGE-DOWNSTREAM"
-#define AXP288_EXTCON_FAST_CHARGER             "FAST-CHARGER"
-
 enum {
        VBUS_OV_IRQ = 0,
        CHARGE_DONE_IRQ,
@@ -158,7 +154,7 @@ struct axp288_chrg_info {
        /* OTG/Host mode */
        struct {
                struct work_struct work;
-               struct extcon_specific_cable_nb cable;
+               struct extcon_dev *cable;
                struct notifier_block id_nb;
                bool id_short;
        } otg;
@@ -586,17 +582,15 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work)
        bool old_connected = info->cable.connected;
 
        /* Determine cable/charger type */
-       if (extcon_get_cable_state(edev, AXP288_EXTCON_SLOW_CHARGER) > 0) {
+       if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_SDP) > 0) {
                dev_dbg(&info->pdev->dev, "USB SDP charger  is connected");
                info->cable.connected = true;
                info->cable.chg_type = POWER_SUPPLY_TYPE_USB;
-       } else if (extcon_get_cable_state(edev,
-                               AXP288_EXTCON_DOWNSTREAM_CHARGER) > 0) {
+       } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_CDP) > 0) {
                dev_dbg(&info->pdev->dev, "USB CDP charger is connected");
                info->cable.connected = true;
                info->cable.chg_type = POWER_SUPPLY_TYPE_USB_CDP;
-       } else if (extcon_get_cable_state(edev,
-                                       AXP288_EXTCON_FAST_CHARGER) > 0) {
+       } else if (extcon_get_cable_state_(edev, EXTCON_CHG_USB_DCP) > 0) {
                dev_dbg(&info->pdev->dev, "USB DCP charger is connected");
                info->cable.connected = true;
                info->cable.chg_type = POWER_SUPPLY_TYPE_USB_DCP;
@@ -692,8 +686,8 @@ static int axp288_charger_handle_otg_evt(struct notifier_block *nb,
 {
        struct axp288_chrg_info *info =
            container_of(nb, struct axp288_chrg_info, otg.id_nb);
-       struct extcon_dev *edev = param;
-       int usb_host = extcon_get_cable_state(edev, "USB-Host");
+       struct extcon_dev *edev = info->otg.cable;
+       int usb_host = extcon_get_cable_state_(edev, EXTCON_USB_HOST);
 
        dev_dbg(&info->pdev->dev, "external connector USB-Host is %s\n",
                                usb_host ? "attached" : "detached");
@@ -848,10 +842,33 @@ static int axp288_charger_probe(struct platform_device *pdev)
        /* Register for extcon notification */
        INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
        info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
-       ret = extcon_register_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
+       ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
+                                       &info->cable.nb);
+       if (ret) {
+               dev_err(&info->pdev->dev,
+                       "failed to register extcon notifier for SDP %d\n", ret);
+               return ret;
+       }
+
+       ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
+                                       &info->cable.nb);
+       if (ret) {
+               dev_err(&info->pdev->dev,
+                       "failed to register extcon notifier for CDP %d\n", ret);
+               extcon_unregister_notifier(info->cable.edev,
+                               EXTCON_CHG_USB_SDP, &info->cable.nb);
+               return ret;
+       }
+
+       ret = extcon_register_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
+                                       &info->cable.nb);
        if (ret) {
                dev_err(&info->pdev->dev,
-                       "failed to register extcon notifier %d\n", ret);
+                       "failed to register extcon notifier for DCP %d\n", ret);
+               extcon_unregister_notifier(info->cable.edev,
+                               EXTCON_CHG_USB_SDP, &info->cable.nb);
+               extcon_unregister_notifier(info->cable.edev,
+                               EXTCON_CHG_USB_CDP, &info->cable.nb);
                return ret;
        }
 
@@ -871,14 +888,14 @@ static int axp288_charger_probe(struct platform_device *pdev)
        /* Register for OTG notification */
        INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
        info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
-       ret = extcon_register_interest(&info->otg.cable, NULL, "USB-Host",
+       ret = extcon_register_notifier(info->otg.cable, EXTCON_USB_HOST,
                                       &info->otg.id_nb);
        if (ret)
                dev_warn(&pdev->dev, "failed to register otg notifier\n");
 
-       if (info->otg.cable.edev)
-               info->otg.id_short = extcon_get_cable_state(
-                                       info->otg.cable.edev, "USB-Host");
+       if (info->otg.cable)
+               info->otg.id_short = extcon_get_cable_state_(
+                                       info->otg.cable, EXTCON_USB_HOST);
 
        /* Register charger interrupts */
        for (i = 0; i < CHRG_INTR_END; i++) {
@@ -905,11 +922,17 @@ static int axp288_charger_probe(struct platform_device *pdev)
        return 0;
 
 intr_reg_failed:
-       if (info->otg.cable.edev)
-               extcon_unregister_interest(&info->otg.cable);
+       if (info->otg.cable)
+               extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
+                                       &info->otg.id_nb);
        power_supply_unregister(info->psy_usb);
 psy_reg_failed:
-       extcon_unregister_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
+       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
+                                       &info->cable.nb);
+       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
+                                       &info->cable.nb);
+       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
+                                       &info->cable.nb);
        return ret;
 }
 
@@ -917,10 +940,16 @@ static int axp288_charger_remove(struct platform_device *pdev)
 {
        struct axp288_chrg_info *info =  dev_get_drvdata(&pdev->dev);
 
-       if (info->otg.cable.edev)
-               extcon_unregister_interest(&info->otg.cable);
+       if (info->otg.cable)
+               extcon_unregister_notifier(info->otg.cable, EXTCON_USB_HOST,
+                                       &info->otg.id_nb);
 
-       extcon_unregister_notifier(info->cable.edev, EXTCON_NONE, &info->cable.nb);
+       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_SDP,
+                                       &info->cable.nb);
+       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_CDP,
+                                       &info->cable.nb);
+       extcon_unregister_notifier(info->cable.edev, EXTCON_CHG_USB_DCP,
+                                       &info->cable.nb);
        power_supply_unregister(info->psy_usb);
 
        return 0;
index 45f6ebf88df6b7962f288a599ca8363d2131923d..e90b3f307e0f21aec47ee3bcd2e0a113666acbe6 100644 (file)
@@ -735,11 +735,8 @@ static void bq27xxx_battery_poll(struct work_struct *work)
 
        bq27xxx_battery_update(di);
 
-       if (poll_interval > 0) {
-               /* The timer does not have to be accurate. */
-               set_timer_slack(&di->work.timer, poll_interval * HZ / 4);
+       if (poll_interval > 0)
                schedule_delayed_work(&di->work, poll_interval * HZ);
-       }
 }
 
 /*
index b2766b867b0eb692dd30d7a1987567b5688077c9..2e8f2be5b6f909a5f26caab3c7629836516f35bc 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <asm/processor.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 
 /* Local defines */
 #define MSR_PLATFORM_POWER_LIMIT       0x0000065C
@@ -1096,27 +1097,34 @@ static const struct rapl_defaults rapl_defaults_cht = {
                }
 
 static const struct x86_cpu_id rapl_ids[] __initconst = {
-       RAPL_CPU(0x2a, rapl_defaults_core),/* Sandy Bridge */
-       RAPL_CPU(0x2d, rapl_defaults_core),/* Sandy Bridge EP */
-       RAPL_CPU(0x37, rapl_defaults_byt),/* Valleyview */
-       RAPL_CPU(0x3a, rapl_defaults_core),/* Ivy Bridge */
-       RAPL_CPU(0x3c, rapl_defaults_core),/* Haswell */
-       RAPL_CPU(0x3d, rapl_defaults_core),/* Broadwell */
-       RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */
-       RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */
-       RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
-       RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */
-       RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */
-       RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */
-       RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */
-       RAPL_CPU(0x4A, rapl_defaults_tng),/* Tangier */
-       RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
-       RAPL_CPU(0x5A, rapl_defaults_ann),/* Annidale */
-       RAPL_CPU(0X5C, rapl_defaults_core),/* Broxton */
-       RAPL_CPU(0x5E, rapl_defaults_core),/* Skylake-H/S */
-       RAPL_CPU(0x57, rapl_defaults_hsw_server),/* Knights Landing */
-       RAPL_CPU(0x8E, rapl_defaults_core),/* Kabylake */
-       RAPL_CPU(0x9E, rapl_defaults_core),/* Kabylake */
+       RAPL_CPU(INTEL_FAM6_SANDYBRIDGE,        rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_SANDYBRIDGE_X,      rapl_defaults_core),
+
+       RAPL_CPU(INTEL_FAM6_IVYBRIDGE,          rapl_defaults_core),
+
+       RAPL_CPU(INTEL_FAM6_HASWELL_CORE,       rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_HASWELL_ULT,        rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_HASWELL_GT3E,       rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_HASWELL_X,          rapl_defaults_hsw_server),
+
+       RAPL_CPU(INTEL_FAM6_BROADWELL_CORE,     rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_BROADWELL_GT3E,     rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_BROADWELL_XEON_D,   rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_BROADWELL_X,        rapl_defaults_hsw_server),
+
+       RAPL_CPU(INTEL_FAM6_SKYLAKE_DESKTOP,    rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_SKYLAKE_MOBILE,     rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_SKYLAKE_X,          rapl_defaults_hsw_server),
+       RAPL_CPU(INTEL_FAM6_KABYLAKE_MOBILE,    rapl_defaults_core),
+       RAPL_CPU(INTEL_FAM6_KABYLAKE_DESKTOP,   rapl_defaults_core),
+
+       RAPL_CPU(INTEL_FAM6_ATOM_SILVERMONT1,   rapl_defaults_byt),
+       RAPL_CPU(INTEL_FAM6_ATOM_AIRMONT,       rapl_defaults_cht),
+       RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD1,   rapl_defaults_tng),
+       RAPL_CPU(INTEL_FAM6_ATOM_MERRIFIELD2,   rapl_defaults_ann),
+       RAPL_CPU(INTEL_FAM6_ATOM_GOLDMONT,      rapl_defaults_core),
+
+       RAPL_CPU(INTEL_FAM6_XEON_PHI_KNL,       rapl_defaults_hsw_server),
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
index 38a8bbe7481008232e7336777850d683afb765be..83797d89c30fe88665c5d7214c391a0a571cfe88 100644 (file)
@@ -195,7 +195,7 @@ static void parport_detach(struct parport *port)
        struct pps_client_pp *device;
 
        /* FIXME: oooh, this is ugly! */
-       if (strcmp(pardev->name, KBUILD_MODNAME))
+       if (!pardev || strcmp(pardev->name, KBUILD_MODNAME))
                /* not our port */
                return;
 
index 526bf23dcb49a543032c6ac17da76c2e09009545..6c7fe4778793758478d9a27e3f9d11f6d4b4a1b2 100644 (file)
@@ -152,7 +152,6 @@ static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
        .enable = rpm_reg_enable,
        .disable = rpm_reg_disable,
        .is_enabled = rpm_reg_is_enabled,
-       .list_voltage = regulator_list_voltage_linear_range,
 
        .get_voltage = rpm_reg_get_voltage,
        .set_voltage = rpm_reg_set_voltage,
index 80b1979e8d955f6022e7810a4b65a62f13ec1224..df036b872b050b835d0fea6f620726dc8876225b 100644 (file)
@@ -1051,6 +1051,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
                qeth_l2_set_offline(cgdev);
 
        if (card->dev) {
+               netif_napi_del(&card->napi);
                unregister_netdev(card->dev);
                card->dev = NULL;
        }
index ac544330daeb7cffaccc37306f041deffd1475bb..709b52339ff9a5907812d315e65bca964ddd11be 100644 (file)
@@ -3226,6 +3226,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
                qeth_l3_set_offline(cgdev);
 
        if (card->dev) {
+               netif_napi_del(&card->napi);
                unregister_netdev(card->dev);
                card->dev = NULL;
        }
index d6a691e27d33317c72d785340b5b371378932741..d6803a9e5ab8a49ea045bcbfafce7029ef439c5d 100644 (file)
@@ -10093,6 +10093,7 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
                ioa_cfg->intr_flag = IPR_USE_MSI;
        else {
                ioa_cfg->intr_flag = IPR_USE_LSI;
+               ioa_cfg->clear_isr = 1;
                ioa_cfg->nvectors = 1;
                dev_info(&pdev->dev, "Cannot enable MSI.\n");
        }
index 5649c200d37ce12f80fc49ad860a185c1a52af5f..a92a62dea7934429e48c252f77405a8dd70b6400 100644 (file)
@@ -2548,7 +2548,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        if (!vha->flags.online)
                return;
 
-       if (rsp->msix->cpuid != smp_processor_id()) {
+       if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
                /* if kernel does not notify qla of IRQ's CPU change,
                 * then set it here.
                 */
index ff41c310c900a5760bf33423a5db4362c5dfc25b..eaccd651ccda0d239af91ebfb6dfdbd97ac340e3 100644 (file)
@@ -429,7 +429,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
         * here, and we don't know what device it is
         * trying to work with, leave it as-is.
         */
-       vmax = 8;       /* max length of vendor */
+       vmax = sizeof(devinfo->vendor);
        vskip = vendor;
        while (vmax > 0 && *vskip == ' ') {
                vmax--;
@@ -439,7 +439,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
        while (vmax > 0 && vskip[vmax - 1] == ' ')
                --vmax;
 
-       mmax = 16;      /* max length of model */
+       mmax = sizeof(devinfo->model);
        mskip = model;
        while (mmax > 0 && *mskip == ' ') {
                mmax--;
@@ -455,10 +455,12 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor,
                         * Behave like the older version of get_device_flags.
                         */
                        if (memcmp(devinfo->vendor, vskip, vmax) ||
-                                       devinfo->vendor[vmax])
+                                       (vmax < sizeof(devinfo->vendor) &&
+                                               devinfo->vendor[vmax]))
                                continue;
                        if (memcmp(devinfo->model, mskip, mmax) ||
-                                       devinfo->model[mmax])
+                                       (mmax < sizeof(devinfo->model) &&
+                                               devinfo->model[mmax]))
                                continue;
                        return devinfo;
                } else {
index a9bac3bf20de14e541a22608fcb1542cad0f6d5f..c887ecdaf19b7c10fd8839a52993ca1f16141d84 100644 (file)
@@ -34,15 +34,6 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
 
 static int __init sh_pm_runtime_init(void)
 {
-       if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
-               if (!of_find_compatible_node(NULL, NULL,
-                                            "renesas,cpg-mstp-clocks"))
-                       return 0;
-               if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS_OF) &&
-                   of_find_node_with_property(NULL, "#power-domain-cells"))
-                       return 0;
-       }
-
        pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
        return 0;
 }
index 7c197d1a123169c48713f298d2abb84f5c2c96c5..af9476460023a248b3349765c50e2f51772d65ab 100644 (file)
@@ -102,4 +102,6 @@ source "drivers/staging/most/Kconfig"
 
 source "drivers/staging/i4l/Kconfig"
 
+source "drivers/staging/ks7010/Kconfig"
+
 endif # STAGING
index a470c72761422ba9f314207ead2e922b16e02861..9f6009dcafa85b45e066c29b42c9ca2ae1ff57d0 100644 (file)
@@ -40,3 +40,4 @@ obj-$(CONFIG_FSL_MC_BUS)      += fsl-mc/
 obj-$(CONFIG_WILC1000)         += wilc1000/
 obj-$(CONFIG_MOST)             += most/
 obj-$(CONFIG_ISDN_I4L)         += i4l/
+obj-$(CONFIG_KS7010)           += ks7010/
index 6480f60ebf6c4a4d2be86ee798d6b304f4b8ceea..06e41d24ec62053c7f698b72ba4d6ca12f6e0f32 100644 (file)
@@ -24,26 +24,19 @@ config ANDROID_LOW_MEMORY_KILLER
          scripts (/init.rc), and it defines priority values with minimum free memory size
          for each priority.
 
-config SYNC
-       bool "Synchronization framework"
-       default n
-       select ANON_INODES
-       select DMA_SHARED_BUFFER
-       ---help---
-         This option enables the framework for synchronization between multiple
-         drivers.  Sync implementations can take advantage of hardware
-         synchronization built into devices like GPUs.
-
 config SW_SYNC
-       bool "Software synchronization objects"
+       bool "Software synchronization framework"
        default n
-       depends on SYNC
        depends on SYNC_FILE
+       depends on DEBUG_FS
        ---help---
          A sync object driver that uses a 32bit counter to coordinate
          synchronization.  Useful when there is no hardware primitive backing
          the synchronization.
 
+         WARNING: improper use of this can result in deadlocking kernel
+         drivers from userspace. Intended for test and debug only.
+
 source "drivers/staging/android/ion/Kconfig"
 
 endif # if ANDROID
index 980d6dc4b26558614fcb2de03f948eb34641ed49..7ca61b77a8d44f2cc95b02066e5d2f1fb33791f6 100644 (file)
@@ -4,5 +4,4 @@ obj-y                                   += ion/
 
 obj-$(CONFIG_ASHMEM)                   += ashmem.o
 obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)        += lowmemorykiller.o
-obj-$(CONFIG_SYNC)                     += sync.o sync_debug.o
-obj-$(CONFIG_SW_SYNC)                  += sw_sync.o
+obj-$(CONFIG_SW_SYNC)                  += sw_sync.o sync_debug.o
index af39ff58fa33134ad17b2a19294a30d0615bbe26..115c9174705f1b1bd2fe34dac727df54c1b9b75b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * drivers/base/sw_sync.c
+ * drivers/dma-buf/sw_sync.c
  *
  * Copyright (C) 2012 Google, Inc.
  *
  *
  */
 
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
 #include <linux/file.h>
 #include <linux/fs.h>
-#include <linux/miscdevice.h>
-#include <linux/syscalls.h>
 #include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
 
-#include "sw_sync.h"
+#include "sync_debug.h"
 
-struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value)
+#define CREATE_TRACE_POINTS
+#include "trace/sync.h"
+
+struct sw_sync_create_fence_data {
+       __u32   value;
+       char    name[32];
+       __s32   fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC      'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE       _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+               struct sw_sync_create_fence_data)
+#define SW_SYNC_IOC_INC                        _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+static const struct fence_ops timeline_fence_ops;
+
+static inline struct sync_pt *fence_to_sync_pt(struct fence *fence)
+{
+       if (fence->ops != &timeline_fence_ops)
+               return NULL;
+       return container_of(fence, struct sync_pt, base);
+}
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @name:      sync_timeline name
+ *
+ * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
+ * case of error.
+ */
+struct sync_timeline *sync_timeline_create(const char *name)
+{
+       struct sync_timeline *obj;
+
+       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+       if (!obj)
+               return NULL;
+
+       kref_init(&obj->kref);
+       obj->context = fence_context_alloc(1);
+       strlcpy(obj->name, name, sizeof(obj->name));
+
+       INIT_LIST_HEAD(&obj->child_list_head);
+       INIT_LIST_HEAD(&obj->active_list_head);
+       spin_lock_init(&obj->child_list_lock);
+
+       sync_timeline_debug_add(obj);
+
+       return obj;
+}
+
+static void sync_timeline_free(struct kref *kref)
+{
+       struct sync_timeline *obj =
+               container_of(kref, struct sync_timeline, kref);
+
+       sync_timeline_debug_remove(obj);
+
+       kfree(obj);
+}
+
+static void sync_timeline_get(struct sync_timeline *obj)
+{
+       kref_get(&obj->kref);
+}
+
+static void sync_timeline_put(struct sync_timeline *obj)
+{
+       kref_put(&obj->kref, sync_timeline_free);
+}
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj:       sync_timeline to signal
+ * @inc:       num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+       unsigned long flags;
+       struct sync_pt *pt, *next;
+
+       trace_sync_timeline(obj);
+
+       spin_lock_irqsave(&obj->child_list_lock, flags);
+
+       obj->value += inc;
+
+       list_for_each_entry_safe(pt, next, &obj->active_list_head,
+                                active_list) {
+               if (fence_is_signaled_locked(&pt->base))
+                       list_del_init(&pt->active_list);
+       }
+
+       spin_unlock_irqrestore(&obj->child_list_lock, flags);
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @parent:    fence's parent sync_timeline
+ * @size:      size to allocate for this pt
+ * @inc:       value of the fence
+ *
+ * Creates a new sync_pt as a child of @parent.  @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj, int size,
+                            unsigned int value)
+{
+       unsigned long flags;
+       struct sync_pt *pt;
+
+       if (size < sizeof(*pt))
+               return NULL;
+
+       pt = kzalloc(size, GFP_KERNEL);
+       if (!pt)
+               return NULL;
+
+       spin_lock_irqsave(&obj->child_list_lock, flags);
+       sync_timeline_get(obj);
+       fence_init(&pt->base, &timeline_fence_ops, &obj->child_list_lock,
+                  obj->context, value);
+       list_add_tail(&pt->child_list, &obj->child_list_head);
+       INIT_LIST_HEAD(&pt->active_list);
+       spin_unlock_irqrestore(&obj->child_list_lock, flags);
+       return pt;
+}
+
+static const char *timeline_fence_get_driver_name(struct fence *fence)
+{
+       return "sw_sync";
+}
+
+static const char *timeline_fence_get_timeline_name(struct fence *fence)
+{
+       struct sync_timeline *parent = fence_parent(fence);
+
+       return parent->name;
+}
+
+static void timeline_fence_release(struct fence *fence)
 {
-       struct sw_sync_pt *pt;
+       struct sync_pt *pt = fence_to_sync_pt(fence);
+       struct sync_timeline *parent = fence_parent(fence);
+       unsigned long flags;
 
-       pt = (struct sw_sync_pt *)
-               sync_pt_create(&obj->obj, sizeof(struct sw_sync_pt));
+       spin_lock_irqsave(fence->lock, flags);
+       list_del(&pt->child_list);
+       if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
+               list_del(&pt->active_list);
+       spin_unlock_irqrestore(fence->lock, flags);
 
-       pt->value = value;
+       sync_timeline_put(parent);
+       fence_free(fence);
+}
+
+static bool timeline_fence_signaled(struct fence *fence)
+{
+       struct sync_timeline *parent = fence_parent(fence);
 
-       return (struct fence *)pt;
+       return (fence->seqno > parent->value) ? false : true;
 }
-EXPORT_SYMBOL(sw_sync_pt_create);
 
-static int sw_sync_fence_has_signaled(struct fence *fence)
+static bool timeline_fence_enable_signaling(struct fence *fence)
 {
-       struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
-       struct sw_sync_timeline *obj =
-               (struct sw_sync_timeline *)fence_parent(fence);
+       struct sync_pt *pt = fence_to_sync_pt(fence);
+       struct sync_timeline *parent = fence_parent(fence);
 
-       return (pt->value > obj->value) ? 0 : 1;
+       if (timeline_fence_signaled(fence))
+               return false;
+
+       list_add_tail(&pt->active_list, &parent->active_list_head);
+       return true;
 }
 
-static void sw_sync_timeline_value_str(struct sync_timeline *sync_timeline,
-                                      char *str, int size)
+static void timeline_fence_value_str(struct fence *fence,
+                                   char *str, int size)
 {
-       struct sw_sync_timeline *timeline =
-               (struct sw_sync_timeline *)sync_timeline;
-       snprintf(str, size, "%d", timeline->value);
+       snprintf(str, size, "%d", fence->seqno);
 }
 
-static void sw_sync_fence_value_str(struct fence *fence, char *str, int size)
+static void timeline_fence_timeline_value_str(struct fence *fence,
+                                            char *str, int size)
 {
-       struct sw_sync_pt *pt = (struct sw_sync_pt *)fence;
+       struct sync_timeline *parent = fence_parent(fence);
 
-       snprintf(str, size, "%d", pt->value);
+       snprintf(str, size, "%d", parent->value);
 }
 
-static struct sync_timeline_ops sw_sync_timeline_ops = {
-       .driver_name = "sw_sync",
-       .has_signaled = sw_sync_fence_has_signaled,
-       .timeline_value_str = sw_sync_timeline_value_str,
-       .fence_value_str = sw_sync_fence_value_str,
+static const struct fence_ops timeline_fence_ops = {
+       .get_driver_name = timeline_fence_get_driver_name,
+       .get_timeline_name = timeline_fence_get_timeline_name,
+       .enable_signaling = timeline_fence_enable_signaling,
+       .signaled = timeline_fence_signaled,
+       .wait = fence_default_wait,
+       .release = timeline_fence_release,
+       .fence_value_str = timeline_fence_value_str,
+       .timeline_value_str = timeline_fence_timeline_value_str,
 };
 
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+/*
+ * *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
 {
-       struct sw_sync_timeline *obj = (struct sw_sync_timeline *)
-               sync_timeline_create(&sw_sync_timeline_ops,
-                                    sizeof(struct sw_sync_timeline),
-                                    name);
+       struct sync_timeline *obj;
+       char task_comm[TASK_COMM_LEN];
 
-       return obj;
+       get_task_comm(task_comm, current);
+
+       obj = sync_timeline_create(task_comm);
+       if (!obj)
+               return -ENOMEM;
+
+       file->private_data = obj;
+
+       return 0;
 }
-EXPORT_SYMBOL(sw_sync_timeline_create);
 
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
 {
-       obj->value += inc;
+       struct sync_timeline *obj = file->private_data;
+
+       smp_wmb();
+
+       sync_timeline_put(obj);
+       return 0;
+}
+
+static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
+                                      unsigned long arg)
+{
+       int fd = get_unused_fd_flags(O_CLOEXEC);
+       int err;
+       struct sync_pt *pt;
+       struct sync_file *sync_file;
+       struct sw_sync_create_fence_data data;
+
+       if (fd < 0)
+               return fd;
+
+       if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+               err = -EFAULT;
+               goto err;
+       }
+
+       pt = sync_pt_create(obj, sizeof(*pt), data.value);
+       if (!pt) {
+               err = -ENOMEM;
+               goto err;
+       }
+
+       sync_file = sync_file_create(&pt->base);
+       if (!sync_file) {
+               fence_put(&pt->base);
+               err = -ENOMEM;
+               goto err;
+       }
+
+       data.fence = fd;
+       if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+               fput(sync_file->file);
+               err = -EFAULT;
+               goto err;
+       }
+
+       fd_install(fd, sync_file->file);
+
+       return 0;
+
+err:
+       put_unused_fd(fd);
+       return err;
+}
 
-       sync_timeline_signal(&obj->obj);
+static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
+{
+       u32 value;
+
+       if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+               return -EFAULT;
+
+       sync_timeline_signal(obj, value);
+
+       return 0;
+}
+
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+                         unsigned long arg)
+{
+       struct sync_timeline *obj = file->private_data;
+
+       switch (cmd) {
+       case SW_SYNC_IOC_CREATE_FENCE:
+               return sw_sync_ioctl_create_fence(obj, arg);
+
+       case SW_SYNC_IOC_INC:
+               return sw_sync_ioctl_inc(obj, arg);
+
+       default:
+               return -ENOTTY;
+       }
 }
-EXPORT_SYMBOL(sw_sync_timeline_inc);
+
+const struct file_operations sw_sync_debugfs_fops = {
+       .open           = sw_sync_debugfs_open,
+       .release        = sw_sync_debugfs_release,
+       .unlocked_ioctl = sw_sync_ioctl,
+       .compat_ioctl   = sw_sync_ioctl,
+};
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
deleted file mode 100644 (file)
index e18667b..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * include/linux/sw_sync.h
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_SW_SYNC_H
-#define _LINUX_SW_SYNC_H
-
-#include <linux/types.h>
-#include <linux/kconfig.h>
-#include "sync.h"
-#include "uapi/sw_sync.h"
-
-struct sw_sync_timeline {
-       struct  sync_timeline   obj;
-
-       u32                     value;
-};
-
-struct sw_sync_pt {
-       struct fence            pt;
-
-       u32                     value;
-};
-
-#if IS_ENABLED(CONFIG_SW_SYNC)
-struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
-void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
-
-struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
-#else
-static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
-{
-       return NULL;
-}
-
-static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
-{
-}
-
-static inline struct fence *sw_sync_pt_create(struct sw_sync_timeline *obj,
-                                             u32 value)
-{
-       return NULL;
-}
-#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
-
-#endif /* _LINUX_SW_SYNC_H */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
deleted file mode 100644 (file)
index 1d14c83..0000000
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * drivers/base/sync.c
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/anon_inodes.h>
-
-#include "sync.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace/sync.h"
-
-static const struct fence_ops android_fence_ops;
-
-struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
-                                          int size, const char *name)
-{
-       struct sync_timeline *obj;
-
-       if (size < sizeof(struct sync_timeline))
-               return NULL;
-
-       obj = kzalloc(size, GFP_KERNEL);
-       if (!obj)
-               return NULL;
-
-       kref_init(&obj->kref);
-       obj->ops = ops;
-       obj->context = fence_context_alloc(1);
-       strlcpy(obj->name, name, sizeof(obj->name));
-
-       INIT_LIST_HEAD(&obj->child_list_head);
-       INIT_LIST_HEAD(&obj->active_list_head);
-       spin_lock_init(&obj->child_list_lock);
-
-       sync_timeline_debug_add(obj);
-
-       return obj;
-}
-EXPORT_SYMBOL(sync_timeline_create);
-
-static void sync_timeline_free(struct kref *kref)
-{
-       struct sync_timeline *obj =
-               container_of(kref, struct sync_timeline, kref);
-
-       sync_timeline_debug_remove(obj);
-
-       kfree(obj);
-}
-
-static void sync_timeline_get(struct sync_timeline *obj)
-{
-       kref_get(&obj->kref);
-}
-
-static void sync_timeline_put(struct sync_timeline *obj)
-{
-       kref_put(&obj->kref, sync_timeline_free);
-}
-
-void sync_timeline_destroy(struct sync_timeline *obj)
-{
-       obj->destroyed = true;
-       /*
-        * Ensure timeline is marked as destroyed before
-        * changing timeline's fences status.
-        */
-       smp_wmb();
-
-       sync_timeline_put(obj);
-}
-EXPORT_SYMBOL(sync_timeline_destroy);
-
-void sync_timeline_signal(struct sync_timeline *obj)
-{
-       unsigned long flags;
-       struct fence *fence, *next;
-
-       trace_sync_timeline(obj);
-
-       spin_lock_irqsave(&obj->child_list_lock, flags);
-
-       list_for_each_entry_safe(fence, next, &obj->active_list_head,
-                                active_list) {
-               if (fence_is_signaled_locked(fence))
-                       list_del_init(&fence->active_list);
-       }
-
-       spin_unlock_irqrestore(&obj->child_list_lock, flags);
-}
-EXPORT_SYMBOL(sync_timeline_signal);
-
-struct fence *sync_pt_create(struct sync_timeline *obj, int size)
-{
-       unsigned long flags;
-       struct fence *fence;
-
-       if (size < sizeof(*fence))
-               return NULL;
-
-       fence = kzalloc(size, GFP_KERNEL);
-       if (!fence)
-               return NULL;
-
-       spin_lock_irqsave(&obj->child_list_lock, flags);
-       sync_timeline_get(obj);
-       fence_init(fence, &android_fence_ops, &obj->child_list_lock,
-                  obj->context, ++obj->value);
-       list_add_tail(&fence->child_list, &obj->child_list_head);
-       INIT_LIST_HEAD(&fence->active_list);
-       spin_unlock_irqrestore(&obj->child_list_lock, flags);
-       return fence;
-}
-EXPORT_SYMBOL(sync_pt_create);
-
-static const char *android_fence_get_driver_name(struct fence *fence)
-{
-       struct sync_timeline *parent = fence_parent(fence);
-
-       return parent->ops->driver_name;
-}
-
-static const char *android_fence_get_timeline_name(struct fence *fence)
-{
-       struct sync_timeline *parent = fence_parent(fence);
-
-       return parent->name;
-}
-
-static void android_fence_release(struct fence *fence)
-{
-       struct sync_timeline *parent = fence_parent(fence);
-       unsigned long flags;
-
-       spin_lock_irqsave(fence->lock, flags);
-       list_del(&fence->child_list);
-       if (WARN_ON_ONCE(!list_empty(&fence->active_list)))
-               list_del(&fence->active_list);
-       spin_unlock_irqrestore(fence->lock, flags);
-
-       sync_timeline_put(parent);
-       fence_free(fence);
-}
-
-static bool android_fence_signaled(struct fence *fence)
-{
-       struct sync_timeline *parent = fence_parent(fence);
-       int ret;
-
-       ret = parent->ops->has_signaled(fence);
-       if (ret < 0)
-               fence->status = ret;
-       return ret;
-}
-
-static bool android_fence_enable_signaling(struct fence *fence)
-{
-       struct sync_timeline *parent = fence_parent(fence);
-
-       if (android_fence_signaled(fence))
-               return false;
-
-       list_add_tail(&fence->active_list, &parent->active_list_head);
-       return true;
-}
-
-static void android_fence_value_str(struct fence *fence,
-                                   char *str, int size)
-{
-       struct sync_timeline *parent = fence_parent(fence);
-
-       if (!parent->ops->fence_value_str) {
-               if (size)
-                       *str = 0;
-               return;
-       }
-       parent->ops->fence_value_str(fence, str, size);
-}
-
-static void android_fence_timeline_value_str(struct fence *fence,
-                                            char *str, int size)
-{
-       struct sync_timeline *parent = fence_parent(fence);
-
-       if (!parent->ops->timeline_value_str) {
-               if (size)
-                       *str = 0;
-               return;
-       }
-       parent->ops->timeline_value_str(parent, str, size);
-}
-
-static const struct fence_ops android_fence_ops = {
-       .get_driver_name = android_fence_get_driver_name,
-       .get_timeline_name = android_fence_get_timeline_name,
-       .enable_signaling = android_fence_enable_signaling,
-       .signaled = android_fence_signaled,
-       .wait = fence_default_wait,
-       .release = android_fence_release,
-       .fence_value_str = android_fence_value_str,
-       .timeline_value_str = android_fence_timeline_value_str,
-};
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
deleted file mode 100644 (file)
index b56885c..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * include/linux/sync.h
- *
- * Copyright (C) 2012 Google, Inc.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_SYNC_H
-#define _LINUX_SYNC_H
-
-#include <linux/types.h>
-#include <linux/kref.h>
-#include <linux/ktime.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/fence.h>
-
-#include <linux/sync_file.h>
-#include <uapi/linux/sync_file.h>
-
-struct sync_timeline;
-
-/**
- * struct sync_timeline_ops - sync object implementation ops
- * @driver_name:       name of the implementation
- * @has_signaled:      returns:
- *                       1 if pt has signaled
- *                       0 if pt has not signaled
- *                      <0 on error
- * @timeline_value_str: fill str with the value of the sync_timeline's counter
- * @fence_value_str:   fill str with the value of the fence
- */
-struct sync_timeline_ops {
-       const char *driver_name;
-
-       /* required */
-       int (*has_signaled)(struct fence *fence);
-
-       /* optional */
-       void (*timeline_value_str)(struct sync_timeline *timeline, char *str,
-                                  int size);
-
-       /* optional */
-       void (*fence_value_str)(struct fence *fence, char *str, int size);
-};
-
-/**
- * struct sync_timeline - sync object
- * @kref:              reference count on fence.
- * @ops:               ops that define the implementation of the sync_timeline
- * @name:              name of the sync_timeline. Useful for debugging
- * @destroyed:         set when sync_timeline is destroyed
- * @child_list_head:   list of children sync_pts for this sync_timeline
- * @child_list_lock:   lock protecting @child_list_head, destroyed, and
- *                     fence.status
- * @active_list_head:  list of active (unsignaled/errored) sync_pts
- * @sync_timeline_list:        membership in global sync_timeline_list
- */
-struct sync_timeline {
-       struct kref             kref;
-       const struct sync_timeline_ops  *ops;
-       char                    name[32];
-
-       /* protected by child_list_lock */
-       bool                    destroyed;
-       int                     context, value;
-
-       struct list_head        child_list_head;
-       spinlock_t              child_list_lock;
-
-       struct list_head        active_list_head;
-
-#ifdef CONFIG_DEBUG_FS
-       struct list_head        sync_timeline_list;
-#endif
-};
-
-static inline struct sync_timeline *fence_parent(struct fence *fence)
-{
-       return container_of(fence->lock, struct sync_timeline,
-                           child_list_lock);
-}
-
-/*
- * API for sync_timeline implementers
- */
-
-/**
- * sync_timeline_create() - creates a sync object
- * @ops:       specifies the implementation ops for the object
- * @size:      size to allocate for this obj
- * @name:      sync_timeline name
- *
- * Creates a new sync_timeline which will use the implementation specified by
- * @ops.  @size bytes will be allocated allowing for implementation specific
- * data to be kept after the generic sync_timeline struct. Returns the
- * sync_timeline object or NULL in case of error.
- */
-struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
-                                          int size, const char *name);
-
-/**
- * sync_timeline_destroy() - destroys a sync object
- * @obj:       sync_timeline to destroy
- *
- * A sync implementation should call this when the @obj is going away
- * (i.e. module unload.)  @obj won't actually be freed until all its children
- * fences are freed.
- */
-void sync_timeline_destroy(struct sync_timeline *obj);
-
-/**
- * sync_timeline_signal() - signal a status change on a sync_timeline
- * @obj:       sync_timeline to signal
- *
- * A sync implementation should call this any time one of it's fences
- * has signaled or has an error condition.
- */
-void sync_timeline_signal(struct sync_timeline *obj);
-
-/**
- * sync_pt_create() - creates a sync pt
- * @parent:    fence's parent sync_timeline
- * @size:      size to allocate for this pt
- *
- * Creates a new fence as a child of @parent.  @size bytes will be
- * allocated allowing for implementation specific data to be kept after
- * the generic sync_timeline struct. Returns the fence object or
- * NULL in case of error.
- */
-struct fence *sync_pt_create(struct sync_timeline *parent, int size);
-
-#ifdef CONFIG_DEBUG_FS
-
-void sync_timeline_debug_add(struct sync_timeline *obj);
-void sync_timeline_debug_remove(struct sync_timeline *obj);
-void sync_file_debug_add(struct sync_file *fence);
-void sync_file_debug_remove(struct sync_file *fence);
-void sync_dump(void);
-
-#else
-# define sync_timeline_debug_add(obj)
-# define sync_timeline_debug_remove(obj)
-# define sync_file_debug_add(fence)
-# define sync_file_debug_remove(fence)
-# define sync_dump()
-#endif
-
-#endif /* _LINUX_SYNC_H */
index 5f57499c98bfc9beff5d62d90d7b00765dc1593f..4c5a85595a85b7d6339804d20a80a42b0aa5eabf 100644 (file)
  */
 
 #include <linux/debugfs.h>
-#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/poll.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/anon_inodes.h>
-#include <linux/time64.h>
-#include <linux/sync_file.h>
-#include "sw_sync.h"
-
-#ifdef CONFIG_DEBUG_FS
+#include "sync_debug.h"
 
 static struct dentry *dbgfs;
 
@@ -105,7 +91,7 @@ static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
                seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
        }
 
-       if ((!fence || fence->ops->timeline_value_str) &&
+       if (fence->ops->timeline_value_str &&
                fence->ops->fence_value_str) {
                char value[64];
                bool success;
@@ -113,10 +99,9 @@ static void sync_print_fence(struct seq_file *s, struct fence *fence, bool show)
                fence->ops->fence_value_str(fence, value, sizeof(value));
                success = strlen(value);
 
-               if (success)
+               if (success) {
                        seq_printf(s, ": %s", value);
 
-               if (success && fence) {
                        fence->ops->timeline_value_str(fence, value,
                                                       sizeof(value));
 
@@ -133,22 +118,13 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
        struct list_head *pos;
        unsigned long flags;
 
-       seq_printf(s, "%s %s", obj->name, obj->ops->driver_name);
-
-       if (obj->ops->timeline_value_str) {
-               char value[64];
-
-               obj->ops->timeline_value_str(obj, value, sizeof(value));
-               seq_printf(s, ": %s", value);
-       }
-
-       seq_puts(s, "\n");
+       seq_printf(s, "%s: %d\n", obj->name, obj->value);
 
        spin_lock_irqsave(&obj->child_list_lock, flags);
        list_for_each(pos, &obj->child_list_head) {
-               struct fence *fence =
-                       container_of(pos, struct fence, child_list);
-               sync_print_fence(s, fence, false);
+               struct sync_pt *pt =
+                       container_of(pos, struct sync_pt, child_list);
+               sync_print_fence(s, &pt->base, false);
        }
        spin_unlock_irqrestore(&obj->child_list_lock, flags);
 }
@@ -209,126 +185,19 @@ static const struct file_operations sync_info_debugfs_fops = {
        .release        = single_release,
 };
 
-/*
- * *WARNING*
- *
- * improper use of this can result in deadlocking kernel drivers from userspace.
- */
-
-/* opening sw_sync create a new sync obj */
-static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
-{
-       struct sw_sync_timeline *obj;
-       char task_comm[TASK_COMM_LEN];
-
-       get_task_comm(task_comm, current);
-
-       obj = sw_sync_timeline_create(task_comm);
-       if (!obj)
-               return -ENOMEM;
-
-       file->private_data = obj;
-
-       return 0;
-}
-
-static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
-{
-       struct sw_sync_timeline *obj = file->private_data;
-
-       sync_timeline_destroy(&obj->obj);
-       return 0;
-}
-
-static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
-                                      unsigned long arg)
-{
-       int fd = get_unused_fd_flags(O_CLOEXEC);
-       int err;
-       struct fence *fence;
-       struct sync_file *sync_file;
-       struct sw_sync_create_fence_data data;
-
-       if (fd < 0)
-               return fd;
-
-       if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
-               err = -EFAULT;
-               goto err;
-       }
-
-       fence = sw_sync_pt_create(obj, data.value);
-       if (!fence) {
-               err = -ENOMEM;
-               goto err;
-       }
-
-       sync_file = sync_file_create(fence);
-       if (!sync_file) {
-               fence_put(fence);
-               err = -ENOMEM;
-               goto err;
-       }
-
-       data.fence = fd;
-       if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
-               fput(sync_file->file);
-               err = -EFAULT;
-               goto err;
-       }
-
-       fd_install(fd, sync_file->file);
-
-       return 0;
-
-err:
-       put_unused_fd(fd);
-       return err;
-}
-
-static long sw_sync_ioctl_inc(struct sw_sync_timeline *obj, unsigned long arg)
-{
-       u32 value;
-
-       if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
-               return -EFAULT;
-
-       sw_sync_timeline_inc(obj, value);
-
-       return 0;
-}
-
-static long sw_sync_ioctl(struct file *file, unsigned int cmd,
-                         unsigned long arg)
-{
-       struct sw_sync_timeline *obj = file->private_data;
-
-       switch (cmd) {
-       case SW_SYNC_IOC_CREATE_FENCE:
-               return sw_sync_ioctl_create_fence(obj, arg);
-
-       case SW_SYNC_IOC_INC:
-               return sw_sync_ioctl_inc(obj, arg);
-
-       default:
-               return -ENOTTY;
-       }
-}
-
-static const struct file_operations sw_sync_debugfs_fops = {
-       .open           = sw_sync_debugfs_open,
-       .release        = sw_sync_debugfs_release,
-       .unlocked_ioctl = sw_sync_ioctl,
-       .compat_ioctl = sw_sync_ioctl,
-};
-
 static __init int sync_debugfs_init(void)
 {
        dbgfs = debugfs_create_dir("sync", NULL);
 
-       debugfs_create_file("info", 0444, dbgfs, NULL, &sync_info_debugfs_fops);
-       debugfs_create_file("sw_sync", 0644, dbgfs, NULL,
-                           &sw_sync_debugfs_fops);
+       /*
+        * The debugfs files won't ever get removed and thus, there is
+        * no need to protect it against removal races. The use of
+        * debugfs_create_file_unsafe() is actually safe here.
+        */
+       debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
+                                  &sync_info_debugfs_fops);
+       debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
+                                  &sw_sync_debugfs_fops);
 
        return 0;
 }
@@ -359,5 +228,3 @@ void sync_dump(void)
                }
        }
 }
-
-#endif
diff --git a/drivers/staging/android/sync_debug.h b/drivers/staging/android/sync_debug.h
new file mode 100644 (file)
index 0000000..425ebc5
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * include/linux/sync.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/fence.h>
+
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+/**
+ * struct sync_timeline - sync object
+ * @kref:              reference count on fence.
+ * @name:              name of the sync_timeline. Useful for debugging
+ * @child_list_head:   list of children sync_pts for this sync_timeline
+ * @child_list_lock:   lock protecting @child_list_head and fence.status
+ * @active_list_head:  list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list:        membership in global sync_timeline_list
+ */
+struct sync_timeline {
+       struct kref             kref;
+       char                    name[32];
+
+       /* protected by child_list_lock */
+       int                     context, value;
+
+       struct list_head        child_list_head;
+       spinlock_t              child_list_lock;
+
+       struct list_head        active_list_head;
+
+       struct list_head        sync_timeline_list;
+};
+
+static inline struct sync_timeline *fence_parent(struct fence *fence)
+{
+       return container_of(fence->lock, struct sync_timeline,
+                           child_list_lock);
+}
+
+/**
+ * struct sync_pt - sync_pt object
+ * @base: base fence object
+ * @child_list: sync timeline child's list
+ * @active_list: sync timeline active child's list
+ */
+struct sync_pt {
+       struct fence base;
+       struct list_head child_list;
+       struct list_head active_list;
+};
+
+#ifdef CONFIG_SW_SYNC
+
+extern const struct file_operations sw_sync_debugfs_fops;
+
+void sync_timeline_debug_add(struct sync_timeline *obj);
+void sync_timeline_debug_remove(struct sync_timeline *obj);
+void sync_file_debug_add(struct sync_file *fence);
+void sync_file_debug_remove(struct sync_file *fence);
+void sync_dump(void);
+
+#else
+# define sync_timeline_debug_add(obj)
+# define sync_timeline_debug_remove(obj)
+# define sync_file_debug_add(fence)
+# define sync_file_debug_remove(fence)
+# define sync_dump()
+#endif
+
+#endif /* _LINUX_SYNC_H */
index a0f80f41677ef6ffae1df895e7e48bb680370597..6b5ce9640ddda54bb793d76a313a3b368052fcb1 100644 (file)
@@ -5,7 +5,7 @@
 #if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _TRACE_SYNC_H
 
-#include "../sync.h"
+#include "../sync_debug.h"
 #include <linux/tracepoint.h>
 
 TRACE_EVENT(sync_timeline,
@@ -15,21 +15,15 @@ TRACE_EVENT(sync_timeline,
 
        TP_STRUCT__entry(
                        __string(name, timeline->name)
-                       __array(char, value, 32)
+                       __field(u32, value)
        ),
 
        TP_fast_assign(
                        __assign_str(name, timeline->name);
-                       if (timeline->ops->timeline_value_str) {
-                               timeline->ops->timeline_value_str(timeline,
-                                                       __entry->value,
-                                                       sizeof(__entry->value));
-                       } else {
-                               __entry->value[0] = '\0';
-                       }
+                       __entry->value = timeline->value;
        ),
 
-       TP_printk("name=%s value=%s", __get_str(name), __entry->value)
+       TP_printk("name=%s value=%d", __get_str(name), __entry->value)
 );
 
 #endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
diff --git a/drivers/staging/android/uapi/sw_sync.h b/drivers/staging/android/uapi/sw_sync.h
deleted file mode 100644 (file)
index 9b5d486..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_SW_SYNC_H
-#define _UAPI_LINUX_SW_SYNC_H
-
-#include <linux/types.h>
-
-struct sw_sync_create_fence_data {
-       __u32   value;
-       char    name[32];
-       __s32   fence; /* fd of new fence */
-};
-
-#define SW_SYNC_IOC_MAGIC      'W'
-
-#define SW_SYNC_IOC_CREATE_FENCE       _IOWR(SW_SYNC_IOC_MAGIC, 0,\
-               struct sw_sync_create_fence_data)
-#define SW_SYNC_IOC_INC                        _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
-
-#endif /* _UAPI_LINUX_SW_SYNC_H */
index ad5297f6d4182739e4ec317f379efaf088180367..08fb26b51a5fb3d98ae191d83c0e6159a3a8c9b0 100644 (file)
@@ -779,7 +779,7 @@ struct comedi_subdinfo {
        unsigned int flags;
        unsigned int range_type;
        unsigned int settling_time_0;
-       unsigned insn_bits_support;
+       unsigned int insn_bits_support;
        unsigned int unused[8];
 };
 
index 629080f39db0e09b46f69bf7ada769e186a43e4f..1999eed4f4c5de13b80b26ea97598815032956d7 100644 (file)
@@ -312,8 +312,8 @@ static void comedi_file_reset(struct file *file)
        }
        cfp->last_attached = dev->attached;
        cfp->last_detach_count = dev->detach_count;
-       ACCESS_ONCE(cfp->read_subdev) = read_s;
-       ACCESS_ONCE(cfp->write_subdev) = write_s;
+       WRITE_ONCE(cfp->read_subdev, read_s);
+       WRITE_ONCE(cfp->write_subdev, write_s);
 }
 
 static void comedi_file_check(struct file *file)
@@ -331,7 +331,7 @@ static struct comedi_subdevice *comedi_file_read_subdevice(struct file *file)
        struct comedi_file *cfp = file->private_data;
 
        comedi_file_check(file);
-       return ACCESS_ONCE(cfp->read_subdev);
+       return READ_ONCE(cfp->read_subdev);
 }
 
 static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
@@ -339,7 +339,7 @@ static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
        struct comedi_file *cfp = file->private_data;
 
        comedi_file_check(file);
-       return ACCESS_ONCE(cfp->write_subdev);
+       return READ_ONCE(cfp->write_subdev);
 }
 
 static int resize_async_buffer(struct comedi_device *dev,
@@ -1256,16 +1256,17 @@ static int parse_insn(struct comedi_device *dev, struct comedi_insn *insn,
                switch (insn->insn) {
                case INSN_GTOD:
                        {
-                               struct timeval tv;
+                               struct timespec64 tv;
 
                                if (insn->n != 2) {
                                        ret = -EINVAL;
                                        break;
                                }
 
-                               do_gettimeofday(&tv);
-                               data[0] = tv.tv_sec;
-                               data[1] = tv.tv_usec;
+                               ktime_get_real_ts64(&tv);
+                               /* unsigned data safe until 2106 */
+                               data[0] = (unsigned int)tv.tv_sec;
+                               data[1] = tv.tv_nsec / NSEC_PER_USEC;
                                ret = 2;
 
                                break;
@@ -1992,7 +1993,7 @@ static int do_setrsubd_ioctl(struct comedi_device *dev, unsigned long arg,
            !(s_old->async->cmd.flags & CMDF_WRITE))
                return -EBUSY;
 
-       ACCESS_ONCE(cfp->read_subdev) = s_new;
+       WRITE_ONCE(cfp->read_subdev, s_new);
        return 0;
 }
 
@@ -2034,7 +2035,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
            (s_old->async->cmd.flags & CMDF_WRITE))
                return -EBUSY;
 
-       ACCESS_ONCE(cfp->write_subdev) = s_new;
+       WRITE_ONCE(cfp->write_subdev, s_new);
        return 0;
 }
 
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci1564.c
deleted file mode 100644 (file)
index f0c0d58..0000000
+++ /dev/null
@@ -1,187 +0,0 @@
-static int apci1564_timer_insn_config(struct comedi_device *dev,
-                                     struct comedi_subdevice *s,
-                                     struct comedi_insn *insn,
-                                     unsigned int *data)
-{
-       struct apci1564_private *devpriv = dev->private;
-       unsigned int ctrl;
-
-       devpriv->tsk_current = current;
-
-       /* Stop the timer */
-       ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
-       ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
-                 ADDI_TCW_CTRL_ENA);
-       outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
-
-       if (data[1] == 1) {
-               /* Enable timer int & disable all the other int sources */
-               outl(ADDI_TCW_CTRL_IRQ_ENA,
-                    devpriv->timer + ADDI_TCW_CTRL_REG);
-               outl(0x0, dev->iobase + APCI1564_DI_IRQ_REG);
-               outl(0x0, dev->iobase + APCI1564_DO_IRQ_REG);
-               outl(0x0, dev->iobase + APCI1564_WDOG_IRQ_REG);
-               if (devpriv->counters) {
-                       unsigned long iobase;
-
-                       iobase = devpriv->counters + ADDI_TCW_IRQ_REG;
-                       outl(0x0, iobase + APCI1564_COUNTER(0));
-                       outl(0x0, iobase + APCI1564_COUNTER(1));
-                       outl(0x0, iobase + APCI1564_COUNTER(2));
-               }
-       } else {
-               /* disable Timer interrupt */
-               outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
-       }
-
-       /* Loading Timebase */
-       outl(data[2], devpriv->timer + ADDI_TCW_TIMEBASE_REG);
-
-       /* Loading the Reload value */
-       outl(data[3], devpriv->timer + ADDI_TCW_RELOAD_REG);
-
-       ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
-       ctrl &= ~(ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE_MASK |
-                 ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
-                 ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
-                 ADDI_TCW_CTRL_WARN_ENA | ADDI_TCW_CTRL_ENA);
-       ctrl |= ADDI_TCW_CTRL_MODE(2) | ADDI_TCW_CTRL_TIMER_ENA;
-       outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
-
-       return insn->n;
-}
-
-static int apci1564_timer_insn_write(struct comedi_device *dev,
-                                    struct comedi_subdevice *s,
-                                    struct comedi_insn *insn,
-                                    unsigned int *data)
-{
-       struct apci1564_private *devpriv = dev->private;
-       unsigned int ctrl;
-
-       ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
-       ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
-       switch (data[1]) {
-       case 0: /* Stop The Timer */
-               ctrl &= ~ADDI_TCW_CTRL_ENA;
-               break;
-       case 1: /* Enable the Timer */
-               ctrl |= ADDI_TCW_CTRL_ENA;
-               break;
-       }
-       outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
-
-       return insn->n;
-}
-
-static int apci1564_timer_insn_read(struct comedi_device *dev,
-                                   struct comedi_subdevice *s,
-                                   struct comedi_insn *insn,
-                                   unsigned int *data)
-{
-       struct apci1564_private *devpriv = dev->private;
-
-       /* Stores the status of the Timer */
-       data[0] = inl(devpriv->timer + ADDI_TCW_STATUS_REG) &
-                 ADDI_TCW_STATUS_OVERFLOW;
-
-       /* Stores the Actual value of the Timer */
-       data[1] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
-
-       return insn->n;
-}
-
-static int apci1564_counter_insn_config(struct comedi_device *dev,
-                                       struct comedi_subdevice *s,
-                                       struct comedi_insn *insn,
-                                       unsigned int *data)
-{
-       struct apci1564_private *devpriv = dev->private;
-       unsigned int chan = CR_CHAN(insn->chanspec);
-       unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
-       unsigned int ctrl;
-
-       devpriv->tsk_current = current;
-
-       /* Stop The Timer */
-       ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
-       ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG |
-                 ADDI_TCW_CTRL_ENA);
-       outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
-       /* Set the reload value */
-       outl(data[3], iobase + ADDI_TCW_RELOAD_REG);
-
-       /* Set the mode */
-       ctrl &= ~(ADDI_TCW_CTRL_EXT_CLK_MASK | ADDI_TCW_CTRL_MODE_MASK |
-                 ADDI_TCW_CTRL_TIMER_ENA | ADDI_TCW_CTRL_RESET_ENA |
-                 ADDI_TCW_CTRL_WARN_ENA);
-       ctrl |= ADDI_TCW_CTRL_CNTR_ENA | ADDI_TCW_CTRL_MODE(data[4]);
-       outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
-       /* Enable or Disable Interrupt */
-       if (data[1])
-               ctrl |= ADDI_TCW_CTRL_IRQ_ENA;
-       else
-               ctrl &= ~ADDI_TCW_CTRL_IRQ_ENA;
-       outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
-       /* Set the Up/Down selection */
-       if (data[6])
-               ctrl |= ADDI_TCW_CTRL_CNT_UP;
-       else
-               ctrl &= ~ADDI_TCW_CTRL_CNT_UP;
-       outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
-       return insn->n;
-}
-
-static int apci1564_counter_insn_write(struct comedi_device *dev,
-                                      struct comedi_subdevice *s,
-                                      struct comedi_insn *insn,
-                                      unsigned int *data)
-{
-       struct apci1564_private *devpriv = dev->private;
-       unsigned int chan = CR_CHAN(insn->chanspec);
-       unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
-       unsigned int ctrl;
-
-       ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
-       ctrl &= ~(ADDI_TCW_CTRL_GATE | ADDI_TCW_CTRL_TRIG);
-       switch (data[1]) {
-       case 0: /* Stops the Counter subdevice */
-               ctrl = 0;
-               break;
-       case 1: /* Start the Counter subdevice */
-               ctrl |= ADDI_TCW_CTRL_ENA;
-               break;
-       case 2: /* Clears the Counter subdevice */
-               ctrl |= ADDI_TCW_CTRL_GATE;
-               break;
-       }
-       outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
-
-       return insn->n;
-}
-
-static int apci1564_counter_insn_read(struct comedi_device *dev,
-                                     struct comedi_subdevice *s,
-                                     struct comedi_insn *insn,
-                                     unsigned int *data)
-{
-       struct apci1564_private *devpriv = dev->private;
-       unsigned int chan = CR_CHAN(insn->chanspec);
-       unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
-       unsigned int status;
-
-       /* Read the Counter Actual Value. */
-       data[0] = inl(iobase + ADDI_TCW_VAL_REG);
-
-       status = inl(iobase + ADDI_TCW_STATUS_REG);
-       data[1] = (status & ADDI_TCW_STATUS_SOFT_TRIG) ? 1 : 0;
-       data[2] = (status & ADDI_TCW_STATUS_HARDWARE_TRIG) ? 1 : 0;
-       data[3] = (status & ADDI_TCW_STATUS_SOFT_CLR) ? 1 : 0;
-       data[4] = (status & ADDI_TCW_STATUS_OVERFLOW) ? 1 : 0;
-
-       return insn->n;
-}
index f1ccfbd4c578ef89fec698df5365f2ad10ffd83e..9bfb79c2e5c8795e366d94c6650b634104e780ca 100644 (file)
  * details.
  */
 
+/*
+ * Driver: addi_apci_1564
+ * Description: ADDI-DATA APCI-1564 Digital I/O board
+ * Devices: [ADDI-DATA] APCI-1564 (addi_apci_1564)
+ * Author: H Hartley Sweeten <hsweeten@visionengravers.com>
+ * Updated: Thu, 02 Jun 2016 13:12:46 -0700
+ * Status: untested
+ *
+ * Configuration Options: not applicable, uses comedi PCI auto config
+ *
+ * This board has the following features:
+ *   - 32 optically isolated digital inputs (24V), 16 of which can
+ *     generate change-of-state (COS) interrupts (channels 4 to 19)
+ *   - 32 optically isolated digital outputs (10V to 36V)
+ *   - 1 8-bit watchdog for resetting the outputs
+ *   - 1 12-bit timer
+ *   - 3 32-bit counters
+ *   - 2 diagnostic inputs
+ *
+ * The COS, timer, and counter subdevices all use the dev->read_subdev to
+ * return the interrupt status. The sample data is updated and returned when
+ * any of these subdevices generate an interrupt. The sample data format is:
+ *
+ *    Bit   Description
+ *   -----  ------------------------------------------
+ *    31    COS interrupt
+ *    30    timer interrupt
+ *    29    counter 2 interrupt
+ *    28    counter 1 interrupt
+ *    27    counter 0 interrupt
+ *   26:20  not used
+ *   19:4   COS digital input state (channels 19 to 4)
+ *    3:0   not used
+ *
+ * The COS interrupts must be configured using an INSN_CONFIG_DIGITAL_TRIG
+ * instruction before they can be enabled by an async command. The COS
+ * interrupts will stay active until canceled.
+ *
+ * The timer subdevice does not use an async command. All control is handled
+ * by the (*insn_config).
+ *
+ * FIXME: The format of the ADDI_TCW_TIMEBASE_REG is not descibed in the
+ * datasheet I have. The INSN_CONFIG_SET_CLOCK_SRC currently just writes
+ * the raw data[1] to this register along with the raw data[2] value to the
+ * ADDI_TCW_RELOAD_REG. If anyone tests this and can determine the actual
+ * timebase/reload operation please let me know.
+ *
+ * The counter subdevice also does not use an async command. All control is
+ * handled by the (*insn_config).
+ *
+ * FIXME: The operation of the counters is not really described in the
+ * datasheet I have. The (*insn_config) needs more work.
+ */
+
 #include <linux/module.h>
 #include <linux/interrupt.h>
-#include <linux/sched.h>
 
 #include "../comedi_pci.h"
 #include "addi_tcw.h"
 #define APCI1564_DI_REG                                0x00
 #define APCI1564_DI_INT_MODE1_REG              0x04
 #define APCI1564_DI_INT_MODE2_REG              0x08
+#define APCI1564_DI_INT_MODE_MASK              0x000ffff0 /* chans [19:4] */
 #define APCI1564_DI_INT_STATUS_REG             0x0c
 #define APCI1564_DI_IRQ_REG                    0x10
 #define APCI1564_DI_IRQ_ENA                    BIT(2)
 #define APCI1564_DO_INT_STATUS_VCC             BIT(0)
 #define APCI1564_DO_IRQ_REG                    0x20
 #define APCI1564_DO_IRQ_INTR                   BIT(0)
-#define APCI1564_WDOG_REG                      0x24
-#define APCI1564_WDOG_RELOAD_REG               0x28
-#define APCI1564_WDOG_TIMEBASE_REG             0x2c
-#define APCI1564_WDOG_CTRL_REG                 0x30
-#define APCI1564_WDOG_STATUS_REG               0x34
-#define APCI1564_WDOG_IRQ_REG                  0x38
-#define APCI1564_WDOG_WARN_TIMEVAL_REG         0x3c
-#define APCI1564_WDOG_WARN_TIMEBASE_REG                0x40
+#define APCI1564_WDOG_IOBASE                   0x24
 
 /*
  * devpriv->timer Register Map (see addi_tcw.h for register/bit defines)
  */
 #define APCI1564_COUNTER(x)                    ((x) * 0x20)
 
+/*
+ * The dev->read_subdev is used to return the interrupt events along with
+ * the state of the interrupt capable inputs.
+ */
+#define APCI1564_EVENT_COS                     BIT(31)
+#define APCI1564_EVENT_TIMER                   BIT(30)
+#define APCI1564_EVENT_COUNTER(x)              BIT(27 + (x)) /* counter 0-2 */
+#define APCI1564_EVENT_MASK                    0xfff0000f /* all but [19:4] */
+
 struct apci1564_private {
        unsigned long eeprom;   /* base address of EEPROM register */
        unsigned long timer;    /* base address of 12-bit timer */
        unsigned long counters; /* base address of 32-bit counters */
-       unsigned int mode1;     /* riding-edge/high level channels */
+       unsigned int mode1;     /* rising-edge/high level channels */
        unsigned int mode2;     /* falling-edge/low level channels */
        unsigned int ctrl;      /* interrupt mode OR (edge) . AND (level) */
-       struct task_struct *tsk_current;
 };
 
-#include "addi-data/hwdrv_apci1564.c"
-
 static int apci1564_reset(struct comedi_device *dev)
 {
        struct apci1564_private *devpriv = dev->private;
@@ -138,7 +191,7 @@ static int apci1564_reset(struct comedi_device *dev)
        outl(0x0, dev->iobase + APCI1564_DO_INT_CTRL_REG);
 
        /* Reset the watchdog registers */
-       addi_watchdog_reset(dev->iobase + APCI1564_WDOG_REG);
+       addi_watchdog_reset(dev->iobase + APCI1564_WDOG_IOBASE);
 
        /* Reset the timer registers */
        outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
@@ -165,55 +218,54 @@ static irqreturn_t apci1564_interrupt(int irq, void *d)
        unsigned int ctrl;
        unsigned int chan;
 
+       s->state &= ~APCI1564_EVENT_MASK;
+
        status = inl(dev->iobase + APCI1564_DI_IRQ_REG);
        if (status & APCI1564_DI_IRQ_ENA) {
-               /* disable the interrupt */
+               /* get the COS interrupt state and set the event flag */
+               s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG);
+               s->state &= APCI1564_DI_INT_MODE_MASK;
+               s->state |= APCI1564_EVENT_COS;
+
+               /* clear the interrupt */
                outl(status & ~APCI1564_DI_IRQ_ENA,
                     dev->iobase + APCI1564_DI_IRQ_REG);
-
-               s->state = inl(dev->iobase + APCI1564_DI_INT_STATUS_REG) &
-                          0xffff;
-               comedi_buf_write_samples(s, &s->state, 1);
-               comedi_handle_events(dev, s);
-
-               /* enable the interrupt */
                outl(status, dev->iobase + APCI1564_DI_IRQ_REG);
        }
 
        status = inl(devpriv->timer + ADDI_TCW_IRQ_REG);
-       if (status & 0x01) {
-               /*  Disable Timer Interrupt */
+       if (status & ADDI_TCW_IRQ) {
+               s->state |= APCI1564_EVENT_TIMER;
+
+               /* clear the interrupt */
                ctrl = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
                outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
-
-               /* Send a signal to from kernel to user space */
-               send_sig(SIGIO, devpriv->tsk_current, 0);
-
-               /*  Enable Timer Interrupt */
                outl(ctrl, devpriv->timer + ADDI_TCW_CTRL_REG);
        }
 
        if (devpriv->counters) {
-               for (chan = 0; chan < 4; chan++) {
+               for (chan = 0; chan < 3; chan++) {
                        unsigned long iobase;
 
                        iobase = devpriv->counters + APCI1564_COUNTER(chan);
 
                        status = inl(iobase + ADDI_TCW_IRQ_REG);
-                       if (status & 0x01) {
-                               /*  Disable Counter Interrupt */
+                       if (status & ADDI_TCW_IRQ) {
+                               s->state |= APCI1564_EVENT_COUNTER(chan);
+
+                               /* clear the interrupt */
                                ctrl = inl(iobase + ADDI_TCW_CTRL_REG);
                                outl(0x0, iobase + ADDI_TCW_CTRL_REG);
-
-                               /* Send a signal to from kernel to user space */
-                               send_sig(SIGIO, devpriv->tsk_current, 0);
-
-                               /*  Enable Counter Interrupt */
                                outl(ctrl, iobase + ADDI_TCW_CTRL_REG);
                        }
                }
        }
 
+       if (s->state & APCI1564_EVENT_MASK) {
+               comedi_buf_write_samples(s, &s->state, 1);
+               comedi_handle_events(dev, s);
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -255,7 +307,7 @@ static int apci1564_diag_insn_bits(struct comedi_device *dev,
 /*
  * Change-Of-State (COS) interrupt configuration
  *
- * Channels 0 to 15 are interruptible. These channels can be configured
+ * Channels 4 to 19 are interruptible. These channels can be configured
  * to generate interrupts based on AND/OR logic for the desired channels.
  *
  *     OR logic
@@ -343,6 +395,10 @@ static int apci1564_cos_insn_config(struct comedi_device *dev,
                default:
                        return -EINVAL;
                }
+
+               /* ensure the mode bits are in-range for channels [19:4] */
+               devpriv->mode1 &= APCI1564_DI_INT_MODE_MASK;
+               devpriv->mode2 &= APCI1564_DI_INT_MODE_MASK;
                break;
        default:
                return -EINVAL;
@@ -409,7 +465,7 @@ static int apci1564_cos_cmd(struct comedi_device *dev,
 {
        struct apci1564_private *devpriv = dev->private;
 
-       if (!devpriv->ctrl) {
+       if (!devpriv->ctrl && !(devpriv->mode1 || devpriv->mode2)) {
                dev_warn(dev->class_dev,
                         "Interrupts disabled due to mode configuration!\n");
                return -EINVAL;
@@ -433,6 +489,173 @@ static int apci1564_cos_cancel(struct comedi_device *dev,
        return 0;
 }
 
+static int apci1564_timer_insn_config(struct comedi_device *dev,
+                                     struct comedi_subdevice *s,
+                                     struct comedi_insn *insn,
+                                     unsigned int *data)
+{
+       struct apci1564_private *devpriv = dev->private;
+       unsigned int val;
+
+       switch (data[0]) {
+       case INSN_CONFIG_ARM:
+               if (data[1] > s->maxdata)
+                       return -EINVAL;
+               outl(data[1], devpriv->timer + ADDI_TCW_RELOAD_REG);
+               outl(ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_TIMER_ENA,
+                    devpriv->timer + ADDI_TCW_CTRL_REG);
+               break;
+       case INSN_CONFIG_DISARM:
+               outl(0x0, devpriv->timer + ADDI_TCW_CTRL_REG);
+               break;
+       case INSN_CONFIG_GET_COUNTER_STATUS:
+               data[1] = 0;
+               val = inl(devpriv->timer + ADDI_TCW_CTRL_REG);
+               if (val & ADDI_TCW_CTRL_IRQ_ENA)
+                       data[1] |= COMEDI_COUNTER_ARMED;
+               if (val & ADDI_TCW_CTRL_TIMER_ENA)
+                       data[1] |= COMEDI_COUNTER_COUNTING;
+               val = inl(devpriv->timer + ADDI_TCW_STATUS_REG);
+               if (val & ADDI_TCW_STATUS_OVERFLOW)
+                       data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
+               data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
+                         COMEDI_COUNTER_TERMINAL_COUNT;
+               break;
+       case INSN_CONFIG_SET_CLOCK_SRC:
+               if (data[2] > s->maxdata)
+                       return -EINVAL;
+               outl(data[1], devpriv->timer + ADDI_TCW_TIMEBASE_REG);
+               outl(data[2], devpriv->timer + ADDI_TCW_RELOAD_REG);
+               break;
+       case INSN_CONFIG_GET_CLOCK_SRC:
+               data[1] = inl(devpriv->timer + ADDI_TCW_TIMEBASE_REG);
+               data[2] = inl(devpriv->timer + ADDI_TCW_RELOAD_REG);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return insn->n;
+}
+
+static int apci1564_timer_insn_write(struct comedi_device *dev,
+                                    struct comedi_subdevice *s,
+                                    struct comedi_insn *insn,
+                                    unsigned int *data)
+{
+       struct apci1564_private *devpriv = dev->private;
+
+       /* just write the last last to the reload register */
+       if (insn->n) {
+               unsigned int val = data[insn->n - 1];
+
+               outl(val, devpriv->timer + ADDI_TCW_RELOAD_REG);
+       }
+
+       return insn->n;
+}
+
+static int apci1564_timer_insn_read(struct comedi_device *dev,
+                                   struct comedi_subdevice *s,
+                                   struct comedi_insn *insn,
+                                   unsigned int *data)
+{
+       struct apci1564_private *devpriv = dev->private;
+       int i;
+
+       /* return the actual value of the timer */
+       for (i = 0; i < insn->n; i++)
+               data[i] = inl(devpriv->timer + ADDI_TCW_VAL_REG);
+
+       return insn->n;
+}
+
+static int apci1564_counter_insn_config(struct comedi_device *dev,
+                                       struct comedi_subdevice *s,
+                                       struct comedi_insn *insn,
+                                       unsigned int *data)
+{
+       struct apci1564_private *devpriv = dev->private;
+       unsigned int chan = CR_CHAN(insn->chanspec);
+       unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+       unsigned int val;
+
+       switch (data[0]) {
+       case INSN_CONFIG_ARM:
+               val = inl(iobase + ADDI_TCW_CTRL_REG);
+               val |= ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_CNTR_ENA;
+               outl(data[1], iobase + ADDI_TCW_RELOAD_REG);
+               outl(val, iobase + ADDI_TCW_CTRL_REG);
+               break;
+       case INSN_CONFIG_DISARM:
+               val = inl(iobase + ADDI_TCW_CTRL_REG);
+               val &= ~(ADDI_TCW_CTRL_IRQ_ENA | ADDI_TCW_CTRL_CNTR_ENA);
+               outl(val, iobase + ADDI_TCW_CTRL_REG);
+               break;
+       case INSN_CONFIG_SET_COUNTER_MODE:
+               /*
+                * FIXME: The counter operation is not described in the
+                * datasheet. For now just write the raw data[1] value to
+                * the control register.
+                */
+               outl(data[1], iobase + ADDI_TCW_CTRL_REG);
+               break;
+       case INSN_CONFIG_GET_COUNTER_STATUS:
+               data[1] = 0;
+               val = inl(iobase + ADDI_TCW_CTRL_REG);
+               if (val & ADDI_TCW_CTRL_IRQ_ENA)
+                       data[1] |= COMEDI_COUNTER_ARMED;
+               if (val & ADDI_TCW_CTRL_CNTR_ENA)
+                       data[1] |= COMEDI_COUNTER_COUNTING;
+               val = inl(iobase + ADDI_TCW_STATUS_REG);
+               if (val & ADDI_TCW_STATUS_OVERFLOW)
+                       data[1] |= COMEDI_COUNTER_TERMINAL_COUNT;
+               data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING |
+                         COMEDI_COUNTER_TERMINAL_COUNT;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return insn->n;
+}
+
+static int apci1564_counter_insn_write(struct comedi_device *dev,
+                                      struct comedi_subdevice *s,
+                                      struct comedi_insn *insn,
+                                      unsigned int *data)
+{
+       struct apci1564_private *devpriv = dev->private;
+       unsigned int chan = CR_CHAN(insn->chanspec);
+       unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+
+       /* just write the last last to the reload register */
+       if (insn->n) {
+               unsigned int val = data[insn->n - 1];
+
+               outl(val, iobase + ADDI_TCW_RELOAD_REG);
+       }
+
+       return insn->n;
+}
+
+static int apci1564_counter_insn_read(struct comedi_device *dev,
+                                     struct comedi_subdevice *s,
+                                     struct comedi_insn *insn,
+                                     unsigned int *data)
+{
+       struct apci1564_private *devpriv = dev->private;
+       unsigned int chan = CR_CHAN(insn->chanspec);
+       unsigned long iobase = devpriv->counters + APCI1564_COUNTER(chan);
+       int i;
+
+       /* return the actual value of the counter */
+       for (i = 0; i < insn->n; i++)
+               data[i] = inl(iobase + ADDI_TCW_VAL_REG);
+
+       return insn->n;
+}
+
 static int apci1564_auto_attach(struct comedi_device *dev,
                                unsigned long context_unused)
 {
@@ -501,7 +724,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
        if (dev->irq) {
                dev->read_subdev = s;
                s->type         = COMEDI_SUBD_DI;
-               s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
+               s->subdev_flags = SDF_READABLE | SDF_CMD_READ | SDF_LSAMPL;
                s->n_chan       = 1;
                s->maxdata      = 1;
                s->range_table  = &range_digital;
@@ -543,7 +766,7 @@ static int apci1564_auto_attach(struct comedi_device *dev,
 
        /* Initialize the watchdog subdevice */
        s = &dev->subdevices[5];
-       ret = addi_watchdog_init(s, dev->iobase + APCI1564_WDOG_REG);
+       ret = addi_watchdog_init(s, dev->iobase + APCI1564_WDOG_IOBASE);
        if (ret)
                return ret;
 
index 4437ea3abe8d04ac4946307b0f42b0c4c7360253..be70bd333807afe697366f9d5e73a6bd1167ded2 100644 (file)
@@ -570,7 +570,7 @@ static int pci9118_ai_cancel(struct comedi_device *dev,
        /* set default config (disable burst and triggers) */
        devpriv->ai_cfg = PCI9118_AI_CFG_PDTRG | PCI9118_AI_CFG_PETRG;
        outl(devpriv->ai_cfg, dev->iobase + PCI9118_AI_CFG_REG);
-       /* reset acqusition control */
+       /* reset acquisition control */
        devpriv->ai_ctrl = 0;
        outl(devpriv->ai_ctrl, dev->iobase + PCI9118_AI_CTRL_REG);
        outl(0, dev->iobase + PCI9118_AI_BURST_NUM_REG);
@@ -1022,12 +1022,12 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
 
        /*
         * Configure analog input and load the chanlist.
-        * The acqusition control bits are enabled later.
+        * The acquisition control bits are enabled later.
         */
        pci9118_set_chanlist(dev, s, cmd->chanlist_len, cmd->chanlist,
                             devpriv->ai_add_front, devpriv->ai_add_back);
 
-       /* Determine acqusition mode and calculate timing */
+       /* Determine acquisition mode and calculate timing */
        devpriv->ai_do = 0;
        if (cmd->scan_begin_src != TRIG_TIMER &&
            cmd->convert_src == TRIG_TIMER) {
@@ -1097,7 +1097,7 @@ static int pci9118_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
 
        if (devpriv->ai_do == 0) {
                dev_err(dev->class_dev,
-                       "Unable to determine acqusition mode! BUG in (*do_cmdtest)?\n");
+                       "Unable to determine acquisition mode! BUG in (*do_cmdtest)?\n");
                return -EINVAL;
        }
 
index c773b8ca65993393ff1dc6269ffc8642c74d2794..1f9c08a845b61ebb027d3434a5fb3c89cebd9b1f 100644 (file)
@@ -1238,7 +1238,7 @@ static void disable_plx_interrupts(struct comedi_device *dev)
 
        devpriv->plx_intcsr_bits = 0;
        writel(devpriv->plx_intcsr_bits,
-              devpriv->plx9080_iobase + PLX_INTRCS_REG);
+              devpriv->plx9080_iobase + PLX_REG_INTCSR);
 }
 
 static void disable_ai_interrupts(struct comedi_device *dev)
@@ -1291,14 +1291,14 @@ static void init_plx9080(struct comedi_device *dev)
        void __iomem *plx_iobase = devpriv->plx9080_iobase;
 
        devpriv->plx_control_bits =
-               readl(devpriv->plx9080_iobase + PLX_CONTROL_REG);
+               readl(devpriv->plx9080_iobase + PLX_REG_CNTRL);
 
 #ifdef __BIG_ENDIAN
-       bits = BIGEND_DMA0 | BIGEND_DMA1;
+       bits = PLX_BIGEND_DMA0 | PLX_BIGEND_DMA1;
 #else
        bits = 0;
 #endif
-       writel(bits, devpriv->plx9080_iobase + PLX_BIGEND_REG);
+       writel(bits, devpriv->plx9080_iobase + PLX_REG_BIGEND);
 
        disable_plx_interrupts(dev);
 
@@ -1308,38 +1308,39 @@ static void init_plx9080(struct comedi_device *dev)
        /*  configure dma0 mode */
        bits = 0;
        /*  enable ready input, not sure if this is necessary */
-       bits |= PLX_DMA_EN_READYIN_BIT;
+       bits |= PLX_DMAMODE_READYIEN;
        /*  enable bterm, not sure if this is necessary */
-       bits |= PLX_EN_BTERM_BIT;
+       bits |= PLX_DMAMODE_BTERMIEN;
        /*  enable dma chaining */
-       bits |= PLX_EN_CHAIN_BIT;
+       bits |= PLX_DMAMODE_CHAINEN;
        /*  enable interrupt on dma done
         *  (probably don't need this, since chain never finishes) */
-       bits |= PLX_EN_DMA_DONE_INTR_BIT;
+       bits |= PLX_DMAMODE_DONEIEN;
        /*  don't increment local address during transfers
         *  (we are transferring from a fixed fifo register) */
-       bits |= PLX_LOCAL_ADDR_CONST_BIT;
+       bits |= PLX_DMAMODE_LACONST;
        /*  route dma interrupt to pci bus */
-       bits |= PLX_DMA_INTR_PCI_BIT;
+       bits |= PLX_DMAMODE_INTRPCI;
        /*  enable demand mode */
-       bits |= PLX_DEMAND_MODE_BIT;
+       bits |= PLX_DMAMODE_DEMAND;
        /*  enable local burst mode */
-       bits |= PLX_DMA_LOCAL_BURST_EN_BIT;
+       bits |= PLX_DMAMODE_BURSTEN;
        /*  4020 uses 32 bit dma */
        if (board->layout == LAYOUT_4020)
-               bits |= PLX_LOCAL_BUS_32_WIDE_BITS;
+               bits |= PLX_DMAMODE_WIDTH32;
        else            /*  localspace0 bus is 16 bits wide */
-               bits |= PLX_LOCAL_BUS_16_WIDE_BITS;
-       writel(bits, plx_iobase + PLX_DMA1_MODE_REG);
+               bits |= PLX_DMAMODE_WIDTH16;
+       writel(bits, plx_iobase + PLX_REG_DMAMODE1);
        if (ao_cmd_is_supported(board))
-               writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
+               writel(bits, plx_iobase + PLX_REG_DMAMODE0);
 
        /*  enable interrupts on plx 9080 */
        devpriv->plx_intcsr_bits |=
-           ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
-           ICS_DMA0_E | ICS_DMA1_E;
+           PLX_INTCSR_LSEABORTEN | PLX_INTCSR_LSEPARITYEN | PLX_INTCSR_PIEN |
+           PLX_INTCSR_PLIEN | PLX_INTCSR_PABORTIEN | PLX_INTCSR_LIOEN |
+           PLX_INTCSR_DMA0IEN | PLX_INTCSR_DMA1IEN;
        writel(devpriv->plx_intcsr_bits,
-              devpriv->plx9080_iobase + PLX_INTRCS_REG);
+              devpriv->plx9080_iobase + PLX_REG_INTCSR);
 }
 
 static void disable_ai_pacing(struct comedi_device *dev)
@@ -1533,8 +1534,8 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
                        cpu_to_le32((devpriv->ai_dma_desc_bus_addr +
                                     ((i + 1) % ai_dma_ring_count(board)) *
                                     sizeof(devpriv->ai_dma_desc[0])) |
-                                   PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
-                                   PLX_XFER_LOCAL_TO_PCI);
+                                   PLX_DMADPR_DESCPCI | PLX_DMADPR_TCINTR |
+                                   PLX_DMADPR_XFERL2P);
        }
        if (ao_cmd_is_supported(board)) {
                for (i = 0; i < AO_DMA_RING_COUNT; i++) {
@@ -1548,8 +1549,8 @@ static int alloc_and_init_dma_members(struct comedi_device *dev)
                                cpu_to_le32((devpriv->ao_dma_desc_bus_addr +
                                             ((i + 1) % (AO_DMA_RING_COUNT)) *
                                             sizeof(devpriv->ao_dma_desc[0])) |
-                                           PLX_DESC_IN_PCI_BIT |
-                                           PLX_INTR_TERM_COUNT);
+                                           PLX_DMADPR_DESCPCI |
+                                           PLX_DMADPR_TCINTR);
                }
        }
        return 0;
@@ -1613,9 +1614,9 @@ static const int i2c_low_udelay = 10;
 static void i2c_set_sda(struct comedi_device *dev, int state)
 {
        struct pcidas64_private *devpriv = dev->private;
-       static const int data_bit = CTL_EE_W;
+       static const int data_bit = PLX_CNTRL_EEWB;
        void __iomem *plx_control_addr = devpriv->plx9080_iobase +
-                                        PLX_CONTROL_REG;
+                                        PLX_REG_CNTRL;
 
        if (state) {
                /*  set data line high */
@@ -1634,9 +1635,9 @@ static void i2c_set_sda(struct comedi_device *dev, int state)
 static void i2c_set_scl(struct comedi_device *dev, int state)
 {
        struct pcidas64_private *devpriv = dev->private;
-       static const int clock_bit = CTL_USERO;
+       static const int clock_bit = PLX_CNTRL_USERO;
        void __iomem *plx_control_addr = devpriv->plx9080_iobase +
-                                        PLX_CONTROL_REG;
+                                        PLX_REG_CNTRL;
 
        if (state) {
                /*  set clock line high */
@@ -1707,7 +1708,7 @@ static void i2c_write(struct comedi_device *dev, unsigned int address,
         */
 
        /*  make sure we dont send anything to eeprom */
-       devpriv->plx_control_bits &= ~CTL_EE_CS;
+       devpriv->plx_control_bits &= ~PLX_CNTRL_EECS;
 
        i2c_stop(dev);
        i2c_start(dev);
@@ -2367,14 +2368,8 @@ static inline void dma_start_sync(struct comedi_device *dev,
 
        /*  spinlock for plx dma control/status reg */
        spin_lock_irqsave(&dev->spinlock, flags);
-       if (channel)
-               writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT |
-                      PLX_CLEAR_DMA_INTR_BIT,
-                      devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
-       else
-               writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT |
-                      PLX_CLEAR_DMA_INTR_BIT,
-                      devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
+       writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_START | PLX_DMACSR_CLEARINTR,
+              devpriv->plx9080_iobase + PLX_REG_DMACSR(channel));
        spin_unlock_irqrestore(&dev->spinlock, flags);
 }
 
@@ -2552,21 +2547,17 @@ static inline void load_first_dma_descriptor(struct comedi_device *dev,
         * block.  Initializing them to zero seems to fix the problem.
         */
        if (dma_channel) {
-               writel(0,
-                      devpriv->plx9080_iobase + PLX_DMA1_TRANSFER_SIZE_REG);
-               writel(0, devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG);
-               writel(0,
-                      devpriv->plx9080_iobase + PLX_DMA1_LOCAL_ADDRESS_REG);
+               writel(0, devpriv->plx9080_iobase + PLX_REG_DMASIZ1);
+               writel(0, devpriv->plx9080_iobase + PLX_REG_DMAPADR1);
+               writel(0, devpriv->plx9080_iobase + PLX_REG_DMALADR1);
                writel(descriptor_bits,
-                      devpriv->plx9080_iobase + PLX_DMA1_DESCRIPTOR_REG);
+                      devpriv->plx9080_iobase + PLX_REG_DMADPR1);
        } else {
-               writel(0,
-                      devpriv->plx9080_iobase + PLX_DMA0_TRANSFER_SIZE_REG);
-               writel(0, devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
-               writel(0,
-                      devpriv->plx9080_iobase + PLX_DMA0_LOCAL_ADDRESS_REG);
+               writel(0, devpriv->plx9080_iobase + PLX_REG_DMASIZ0);
+               writel(0, devpriv->plx9080_iobase + PLX_REG_DMAPADR0);
+               writel(0, devpriv->plx9080_iobase + PLX_REG_DMALADR0);
                writel(descriptor_bits,
-                      devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
+                      devpriv->plx9080_iobase + PLX_REG_DMADPR0);
        }
 }
 
@@ -2643,9 +2634,9 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
                /*  give location of first dma descriptor */
                load_first_dma_descriptor(dev, 1,
                                          devpriv->ai_dma_desc_bus_addr |
-                                         PLX_DESC_IN_PCI_BIT |
-                                         PLX_INTR_TERM_COUNT |
-                                         PLX_XFER_LOCAL_TO_PCI);
+                                         PLX_DMADPR_DESCPCI |
+                                         PLX_DMADPR_TCINTR |
+                                         PLX_DMADPR_XFERL2P);
 
                dma_start_sync(dev, 1);
        }
@@ -2803,12 +2794,7 @@ static void drain_dma_buffers(struct comedi_device *dev, unsigned int channel)
        int num_samples = 0;
        void __iomem *pci_addr_reg;
 
-       if (channel)
-               pci_addr_reg =
-                   devpriv->plx9080_iobase + PLX_DMA1_PCI_ADDRESS_REG;
-       else
-               pci_addr_reg =
-                   devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
+       pci_addr_reg = devpriv->plx9080_iobase + PLX_REG_DMAPADR(channel);
 
        /*  loop until we have read all the full buffers */
        for (j = 0, next_transfer_addr = readl(pci_addr_reg);
@@ -2850,12 +2836,12 @@ static void handle_ai_interrupt(struct comedi_device *dev,
        }
        /*  spin lock makes sure no one else changes plx dma control reg */
        spin_lock_irqsave(&dev->spinlock, flags);
-       dma1_status = readb(devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
-       if (plx_status & ICS_DMA1_A) {  /*  dma chan 1 interrupt */
-               writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
-                      devpriv->plx9080_iobase + PLX_DMA1_CS_REG);
+       dma1_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR1);
+       if (plx_status & PLX_INTCSR_DMA1IA) {   /*  dma chan 1 interrupt */
+               writeb((dma1_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
+                      devpriv->plx9080_iobase + PLX_REG_DMACSR1);
 
-               if (dma1_status & PLX_DMA_EN_BIT)
+               if (dma1_status & PLX_DMACSR_ENABLE)
                        drain_dma_buffers(dev, 1);
        }
        spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -2902,12 +2888,12 @@ static int last_ao_dma_load_completed(struct comedi_device *dev)
        unsigned short dma_status;
 
        buffer_index = prev_ao_dma_index(dev);
-       dma_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
-       if ((dma_status & PLX_DMA_DONE_BIT) == 0)
+       dma_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+       if ((dma_status & PLX_DMACSR_DONE) == 0)
                return 0;
 
        transfer_address =
-               readl(devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG);
+               readl(devpriv->plx9080_iobase + PLX_REG_DMAPADR0);
        if (transfer_address != devpriv->ao_buffer_bus_addr[buffer_index])
                return 0;
 
@@ -2917,8 +2903,8 @@ static int last_ao_dma_load_completed(struct comedi_device *dev)
 static inline int ao_dma_needs_restart(struct comedi_device *dev,
                                       unsigned short dma_status)
 {
-       if ((dma_status & PLX_DMA_DONE_BIT) == 0 ||
-           (dma_status & PLX_DMA_EN_BIT) == 0)
+       if ((dma_status & PLX_DMACSR_DONE) == 0 ||
+           (dma_status & PLX_DMACSR_ENABLE) == 0)
                return 0;
        if (last_ao_dma_load_completed(dev))
                return 0;
@@ -2931,9 +2917,8 @@ static void restart_ao_dma(struct comedi_device *dev)
        struct pcidas64_private *devpriv = dev->private;
        unsigned int dma_desc_bits;
 
-       dma_desc_bits =
-               readl(devpriv->plx9080_iobase + PLX_DMA0_DESCRIPTOR_REG);
-       dma_desc_bits &= ~PLX_END_OF_CHAIN_BIT;
+       dma_desc_bits = readl(devpriv->plx9080_iobase + PLX_REG_DMADPR0);
+       dma_desc_bits &= ~PLX_DMADPR_CHAINEND;
        load_first_dma_descriptor(dev, 0, dma_desc_bits);
 
        dma_start_sync(dev, 0);
@@ -2974,14 +2959,14 @@ static unsigned int load_ao_dma_buffer(struct comedi_device *dev,
        devpriv->ao_dma_desc[buffer_index].transfer_size = cpu_to_le32(nbytes);
        /* set end of chain bit so we catch underruns */
        next_bits = le32_to_cpu(devpriv->ao_dma_desc[buffer_index].next);
-       next_bits |= PLX_END_OF_CHAIN_BIT;
+       next_bits |= PLX_DMADPR_CHAINEND;
        devpriv->ao_dma_desc[buffer_index].next = cpu_to_le32(next_bits);
        /*
         * clear end of chain bit on previous buffer now that we have set it
         * for the last buffer
         */
        next_bits = le32_to_cpu(devpriv->ao_dma_desc[prev_buffer_index].next);
-       next_bits &= ~PLX_END_OF_CHAIN_BIT;
+       next_bits &= ~PLX_DMADPR_CHAINEND;
        devpriv->ao_dma_desc[prev_buffer_index].next = cpu_to_le32(next_bits);
 
        devpriv->ao_dma_index = (buffer_index + 1) % AO_DMA_RING_COUNT;
@@ -2994,8 +2979,7 @@ static void load_ao_dma(struct comedi_device *dev, const struct comedi_cmd *cmd)
        struct pcidas64_private *devpriv = dev->private;
        unsigned int num_bytes;
        unsigned int next_transfer_addr;
-       void __iomem *pci_addr_reg =
-               devpriv->plx9080_iobase + PLX_DMA0_PCI_ADDRESS_REG;
+       void __iomem *pci_addr_reg = devpriv->plx9080_iobase + PLX_REG_DMAPADR0;
        unsigned int buffer_index;
 
        do {
@@ -3030,17 +3014,18 @@ static void handle_ao_interrupt(struct comedi_device *dev,
 
        /*  spin lock makes sure no one else changes plx dma control reg */
        spin_lock_irqsave(&dev->spinlock, flags);
-       dma0_status = readb(devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
-       if (plx_status & ICS_DMA0_A) {  /*  dma chan 0 interrupt */
-               if ((dma0_status & PLX_DMA_EN_BIT) &&
-                   !(dma0_status & PLX_DMA_DONE_BIT))
-                       writeb(PLX_DMA_EN_BIT | PLX_CLEAR_DMA_INTR_BIT,
-                              devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
-               else
-                       writeb(PLX_CLEAR_DMA_INTR_BIT,
-                              devpriv->plx9080_iobase + PLX_DMA0_CS_REG);
+       dma0_status = readb(devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+       if (plx_status & PLX_INTCSR_DMA0IA) {   /*  dma chan 0 interrupt */
+               if ((dma0_status & PLX_DMACSR_ENABLE) &&
+                   !(dma0_status & PLX_DMACSR_DONE)) {
+                       writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_CLEARINTR,
+                              devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+               } else {
+                       writeb(PLX_DMACSR_CLEARINTR,
+                              devpriv->plx9080_iobase + PLX_REG_DMACSR0);
+               }
                spin_unlock_irqrestore(&dev->spinlock, flags);
-               if (dma0_status & PLX_DMA_EN_BIT) {
+               if (dma0_status & PLX_DMACSR_ENABLE) {
                        load_ao_dma(dev, cmd);
                        /* try to recover from dma end-of-chain event */
                        if (ao_dma_needs_restart(dev, dma0_status))
@@ -3069,7 +3054,7 @@ static irqreturn_t handle_interrupt(int irq, void *d)
        uint32_t plx_status;
        uint32_t plx_bits;
 
-       plx_status = readl(devpriv->plx9080_iobase + PLX_INTRCS_REG);
+       plx_status = readl(devpriv->plx9080_iobase + PLX_REG_INTCSR);
        status = readw(devpriv->main_iobase + HW_STATUS_REG);
 
        /*
@@ -3083,10 +3068,11 @@ static irqreturn_t handle_interrupt(int irq, void *d)
        handle_ai_interrupt(dev, status, plx_status);
        handle_ao_interrupt(dev, status, plx_status);
 
-       /*  clear possible plx9080 interrupt sources */
-       if (plx_status & ICS_LDIA) {    /*  clear local doorbell interrupt */
-               plx_bits = readl(devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
-               writel(plx_bits, devpriv->plx9080_iobase + PLX_DBR_OUT_REG);
+       /* clear possible plx9080 interrupt sources */
+       if (plx_status & PLX_INTCSR_LDBIA) {
+               /* clear local doorbell interrupt */
+               plx_bits = readl(devpriv->plx9080_iobase + PLX_REG_L2PDBELL);
+               writel(plx_bits, devpriv->plx9080_iobase + PLX_REG_L2PDBELL);
        }
 
        return IRQ_HANDLED;
@@ -3324,7 +3310,7 @@ static int ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
        set_dac_select_reg(dev, cmd);
        set_dac_interval_regs(dev, cmd);
        load_first_dma_descriptor(dev, 0, devpriv->ao_dma_desc_bus_addr |
-                                 PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT);
+                                 PLX_DMADPR_DESCPCI | PLX_DMADPR_TCINTR);
 
        set_dac_control1_reg(dev, cmd);
        s->async->inttrig = ao_inttrig;
@@ -3725,19 +3711,19 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
        unsigned int bitstream = (read_command << 8) | address;
        unsigned int bit;
        void __iomem * const plx_control_addr =
-               devpriv->plx9080_iobase + PLX_CONTROL_REG;
+               devpriv->plx9080_iobase + PLX_REG_CNTRL;
        uint16_t value;
        static const int value_length = 16;
        static const int eeprom_udelay = 1;
 
        udelay(eeprom_udelay);
-       devpriv->plx_control_bits &= ~CTL_EE_CLK & ~CTL_EE_CS;
+       devpriv->plx_control_bits &= ~PLX_CNTRL_EESK & ~PLX_CNTRL_EECS;
        /*  make sure we don't send anything to the i2c bus on 4020 */
-       devpriv->plx_control_bits |= CTL_USERO;
+       devpriv->plx_control_bits |= PLX_CNTRL_USERO;
        writel(devpriv->plx_control_bits, plx_control_addr);
        /*  activate serial eeprom */
        udelay(eeprom_udelay);
-       devpriv->plx_control_bits |= CTL_EE_CS;
+       devpriv->plx_control_bits |= PLX_CNTRL_EECS;
        writel(devpriv->plx_control_bits, plx_control_addr);
 
        /*  write read command and desired memory address */
@@ -3745,16 +3731,16 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
                /*  set bit to be written */
                udelay(eeprom_udelay);
                if (bitstream & bit)
-                       devpriv->plx_control_bits |= CTL_EE_W;
+                       devpriv->plx_control_bits |= PLX_CNTRL_EEWB;
                else
-                       devpriv->plx_control_bits &= ~CTL_EE_W;
+                       devpriv->plx_control_bits &= ~PLX_CNTRL_EEWB;
                writel(devpriv->plx_control_bits, plx_control_addr);
                /*  clock in bit */
                udelay(eeprom_udelay);
-               devpriv->plx_control_bits |= CTL_EE_CLK;
+               devpriv->plx_control_bits |= PLX_CNTRL_EESK;
                writel(devpriv->plx_control_bits, plx_control_addr);
                udelay(eeprom_udelay);
-               devpriv->plx_control_bits &= ~CTL_EE_CLK;
+               devpriv->plx_control_bits &= ~PLX_CNTRL_EESK;
                writel(devpriv->plx_control_bits, plx_control_addr);
        }
        /*  read back value from eeprom memory location */
@@ -3762,19 +3748,19 @@ static uint16_t read_eeprom(struct comedi_device *dev, uint8_t address)
        for (bit = 1 << (value_length - 1); bit; bit >>= 1) {
                /*  clock out bit */
                udelay(eeprom_udelay);
-               devpriv->plx_control_bits |= CTL_EE_CLK;
+               devpriv->plx_control_bits |= PLX_CNTRL_EESK;
                writel(devpriv->plx_control_bits, plx_control_addr);
                udelay(eeprom_udelay);
-               devpriv->plx_control_bits &= ~CTL_EE_CLK;
+               devpriv->plx_control_bits &= ~PLX_CNTRL_EESK;
                writel(devpriv->plx_control_bits, plx_control_addr);
                udelay(eeprom_udelay);
-               if (readl(plx_control_addr) & CTL_EE_R)
+               if (readl(plx_control_addr) & PLX_CNTRL_EERB)
                        value |= bit;
        }
 
        /*  deactivate eeprom serial input */
        udelay(eeprom_udelay);
-       devpriv->plx_control_bits &= ~CTL_EE_CS;
+       devpriv->plx_control_bits &= ~PLX_CNTRL_EECS;
        writel(devpriv->plx_control_bits, plx_control_addr);
 
        return value;
@@ -3962,7 +3948,8 @@ static int setup_subdevices(struct comedi_device *dev)
 
        /* serial EEPROM, if present */
        s = &dev->subdevices[8];
-       if (readl(devpriv->plx9080_iobase + PLX_CONTROL_REG) & CTL_EECHK) {
+       if (readl(devpriv->plx9080_iobase + PLX_REG_CNTRL) &
+           PLX_CNTRL_EEPRESENT) {
                s->type = COMEDI_SUBD_MEMORY;
                s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
                s->n_chan = 128;
@@ -4019,16 +4006,16 @@ static int auto_attach(struct comedi_device *dev,
        }
 
        /*  figure out what local addresses are */
-       local_range = readl(devpriv->plx9080_iobase + PLX_LAS0RNG_REG) &
-                     LRNG_MEM_MASK;
-       local_decode = readl(devpriv->plx9080_iobase + PLX_LAS0MAP_REG) &
-                      local_range & LMAP_MEM_MASK;
+       local_range = readl(devpriv->plx9080_iobase + PLX_REG_LAS0RR) &
+                     PLX_LASRR_MEM_MASK;
+       local_decode = readl(devpriv->plx9080_iobase + PLX_REG_LAS0BA) &
+                      local_range & PLX_LASBA_MEM_MASK;
        devpriv->local0_iobase = ((uint32_t)devpriv->main_phys_iobase &
                                  ~local_range) | local_decode;
-       local_range = readl(devpriv->plx9080_iobase + PLX_LAS1RNG_REG) &
-                     LRNG_MEM_MASK;
-       local_decode = readl(devpriv->plx9080_iobase + PLX_LAS1MAP_REG) &
-                      local_range & LMAP_MEM_MASK;
+       local_range = readl(devpriv->plx9080_iobase + PLX_REG_LAS1RR) &
+                     PLX_LASRR_MEM_MASK;
+       local_decode = readl(devpriv->plx9080_iobase + PLX_REG_LAS1BA) &
+                      local_range & PLX_LASBA_MEM_MASK;
        devpriv->local1_iobase = ((uint32_t)devpriv->dio_counter_phys_iobase &
                                  ~local_range) | local_decode;
 
index 50b76eccb7d708cfc112e523f0d633440264a031..64a5ea3810d4f7b38a4871fea3f0ee5df78c1aa6 100644 (file)
 
 struct bonded_device {
        struct comedi_device *dev;
-       unsigned minor;
-       unsigned subdev;
-       unsigned nchans;
+       unsigned int minor;
+       unsigned int subdev;
+       unsigned int nchans;
 };
 
 struct comedi_bond_private {
        char name[256];
        struct bonded_device **devs;
-       unsigned ndevs;
-       unsigned nchans;
+       unsigned int ndevs;
+       unsigned int nchans;
 };
 
 static int bonding_dio_insn_bits(struct comedi_device *dev,
index a536a15c1d30763dd4040ce8d1b41d1158f5da68..65daef0c00d5209c61e2a59d20338ed2afe7c198 100644 (file)
 #define DAQBOARD2000_SUBSYSTEM_IDS4    0x0004  /* Daqboard/2000 - 4 Dacs */
 
 /* Initialization bits for the Serial EEPROM Control Register */
-#define DAQBOARD2000_SECRProgPinHi      0x8001767e
-#define DAQBOARD2000_SECRProgPinLo      0x8000767e
-#define DAQBOARD2000_SECRLocalBusHi     0xc000767e
-#define DAQBOARD2000_SECRLocalBusLo     0x8000767e
-#define DAQBOARD2000_SECRReloadHi       0xa000767e
-#define DAQBOARD2000_SECRReloadLo       0x8000767e
+#define DB2K_SECR_PROG_PIN_HI          0x8001767e
+#define DB2K_SECR_PROG_PIN_LO          0x8000767e
+#define DB2K_SECR_LOCAL_BUS_HI         0xc000767e
+#define DB2K_SECR_LOCAL_BUS_LO         0x8000767e
+#define DB2K_SECR_RELOAD_HI            0xa000767e
+#define DB2K_SECR_RELOAD_LO            0x8000767e
 
 /* SECR status bits */
 #define DAQBOARD2000_EEPROM_PRESENT     0x10000000
@@ -151,119 +151,108 @@ static const struct comedi_lrange range_daqboard2000_ai = {
 /*
  * Register Memory Map
  */
-#define acqControl                     0x00            /* u16 */
-#define acqScanListFIFO                        0x02            /* u16 */
-#define acqPacerClockDivLow            0x04            /* u32 */
-#define acqScanCounter                 0x08            /* u16 */
-#define acqPacerClockDivHigh           0x0a            /* u16 */
-#define acqTriggerCount                        0x0c            /* u16 */
-#define acqResultsFIFO                 0x10            /* u16 */
-#define acqResultsShadow               0x14            /* u16 */
-#define acqAdcResult                   0x18            /* u16 */
-#define dacScanCounter                 0x1c            /* u16 */
-#define dacControl                     0x20            /* u16 */
-#define dacFIFO                                0x24            /* s16 */
-#define dacPacerClockDiv               0x2a            /* u16 */
-#define refDacs                                0x2c            /* u16 */
-#define dioControl                     0x30            /* u16 */
-#define dioP3hsioData                  0x32            /* s16 */
-#define dioP3Control                   0x34            /* u16 */
-#define calEepromControl               0x36            /* u16 */
-#define dacSetting(x)                  (0x38 + (x)*2)  /* s16 */
-#define dioP2ExpansionIO8Bit           0x40            /* s16 */
-#define ctrTmrControl                  0x80            /* u16 */
-#define ctrInput(x)                    (0x88 + (x)*2)  /* s16 */
-#define timerDivisor(x)                        (0xa0 + (x)*2)  /* u16 */
-#define dmaControl                     0xb0            /* u16 */
-#define trigControl                    0xb2            /* u16 */
-#define calEeprom                      0xb8            /* u16 */
-#define acqDigitalMark                 0xba            /* u16 */
-#define trigDacs                       0xbc            /* u16 */
-#define dioP2ExpansionIO16Bit(x)       (0xc0 + (x)*2)  /* s16 */
+#define DB2K_REG_ACQ_CONTROL                   0x00            /* u16 (w) */
+#define DB2K_REG_ACQ_STATUS                    0x00            /* u16 (r) */
+#define DB2K_REG_ACQ_SCAN_LIST_FIFO            0x02            /* u16 */
+#define DB2K_REG_ACQ_PACER_CLOCK_DIV_LOW       0x04            /* u32 */
+#define DB2K_REG_ACQ_SCAN_COUNTER              0x08            /* u16 */
+#define DB2K_REG_ACQ_PACER_CLOCK_DIV_HIGH      0x0a            /* u16 */
+#define DB2K_REG_ACQ_TRIGGER_COUNT             0x0c            /* u16 */
+#define DB2K_REG_ACQ_RESULTS_FIFO              0x10            /* u16 */
+#define DB2K_REG_ACQ_RESULTS_SHADOW            0x14            /* u16 */
+#define DB2K_REG_ACQ_ADC_RESULT                        0x18            /* u16 */
+#define DB2K_REG_DAC_SCAN_COUNTER              0x1c            /* u16 */
+#define DB2K_REG_DAC_CONTROL                   0x20            /* u16 (w) */
+#define DB2K_REG_DAC_STATUS                    0x20            /* u16 (r) */
+#define DB2K_REG_DAC_FIFO                      0x24            /* s16 */
+#define DB2K_REG_DAC_PACER_CLOCK_DIV           0x2a            /* u16 */
+#define DB2K_REG_REF_DACS                      0x2c            /* u16 */
+#define DB2K_REG_DIO_CONTROL                   0x30            /* u16 */
+#define DB2K_REG_P3_HSIO_DATA                  0x32            /* s16 */
+#define DB2K_REG_P3_CONTROL                    0x34            /* u16 */
+#define DB2K_REG_CAL_EEPROM_CONTROL            0x36            /* u16 */
+#define DB2K_REG_DAC_SETTING(x)                        (0x38 + (x) * 2) /* s16 */
+#define DB2K_REG_DIO_P2_EXP_IO_8_BIT           0x40            /* s16 */
+#define DB2K_REG_COUNTER_TIMER_CONTROL         0x80            /* u16 */
+#define DB2K_REG_COUNTER_INPUT(x)              (0x88 + (x) * 2) /* s16 */
+#define DB2K_REG_TIMER_DIV(x)                  (0xa0 + (x) * 2) /* u16 */
+#define DB2K_REG_DMA_CONTROL                   0xb0            /* u16 */
+#define DB2K_REG_TRIG_CONTROL                  0xb2            /* u16 */
+#define DB2K_REG_CAL_EEPROM                    0xb8            /* u16 */
+#define DB2K_REG_ACQ_DIGITAL_MARK              0xba            /* u16 */
+#define DB2K_REG_TRIG_DACS                     0xbc            /* u16 */
+#define DB2K_REG_DIO_P2_EXP_IO_16_BIT(x)       (0xc0 + (x) * 2) /* s16 */
 
 /* Scan Sequencer programming */
-#define DAQBOARD2000_SeqStartScanList            0x0011
-#define DAQBOARD2000_SeqStopScanList             0x0010
+#define DB2K_ACQ_CONTROL_SEQ_START_SCAN_LIST           0x0011
+#define DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST            0x0010
 
 /* Prepare for acquisition */
-#define DAQBOARD2000_AcqResetScanListFifo        0x0004
-#define DAQBOARD2000_AcqResetResultsFifo         0x0002
-#define DAQBOARD2000_AcqResetConfigPipe          0x0001
-
-/* Acqusition status bits */
-#define DAQBOARD2000_AcqResultsFIFOMore1Sample   0x0001
-#define DAQBOARD2000_AcqResultsFIFOHasValidData  0x0002
-#define DAQBOARD2000_AcqResultsFIFOOverrun       0x0004
-#define DAQBOARD2000_AcqLogicScanning            0x0008
-#define DAQBOARD2000_AcqConfigPipeFull           0x0010
-#define DAQBOARD2000_AcqScanListFIFOEmpty        0x0020
-#define DAQBOARD2000_AcqAdcNotReady              0x0040
-#define DAQBOARD2000_ArbitrationFailure          0x0080
-#define DAQBOARD2000_AcqPacerOverrun             0x0100
-#define DAQBOARD2000_DacPacerOverrun             0x0200
-#define DAQBOARD2000_AcqHardwareError            0x01c0
-
-/* Scan Sequencer programming */
-#define DAQBOARD2000_SeqStartScanList            0x0011
-#define DAQBOARD2000_SeqStopScanList             0x0010
+#define DB2K_ACQ_CONTROL_RESET_SCAN_LIST_FIFO          0x0004
+#define DB2K_ACQ_CONTROL_RESET_RESULTS_FIFO            0x0002
+#define DB2K_ACQ_CONTROL_RESET_CONFIG_PIPE             0x0001
 
 /* Pacer Clock Control */
-#define DAQBOARD2000_AdcPacerInternal            0x0030
-#define DAQBOARD2000_AdcPacerExternal            0x0032
-#define DAQBOARD2000_AdcPacerEnable              0x0031
-#define DAQBOARD2000_AdcPacerEnableDacPacer      0x0034
-#define DAQBOARD2000_AdcPacerDisable             0x0030
-#define DAQBOARD2000_AdcPacerNormalMode          0x0060
-#define DAQBOARD2000_AdcPacerCompatibilityMode   0x0061
-#define DAQBOARD2000_AdcPacerInternalOutEnable   0x0008
-#define DAQBOARD2000_AdcPacerExternalRising      0x0100
+#define DB2K_ACQ_CONTROL_ADC_PACER_INTERNAL            0x0030
+#define DB2K_ACQ_CONTROL_ADC_PACER_EXTERNAL            0x0032
+#define DB2K_ACQ_CONTROL_ADC_PACER_ENABLE              0x0031
+#define DB2K_ACQ_CONTROL_ADC_PACER_ENABLE_DAC_PACER    0x0034
+#define DB2K_ACQ_CONTROL_ADC_PACER_DISABLE             0x0030
+#define DB2K_ACQ_CONTROL_ADC_PACER_NORMAL_MODE         0x0060
+#define DB2K_ACQ_CONTROL_ADC_PACER_COMPATIBILITY_MODE  0x0061
+#define DB2K_ACQ_CONTROL_ADC_PACER_INTERNAL_OUT_ENABLE 0x0008
+#define DB2K_ACQ_CONTROL_ADC_PACER_EXTERNAL_RISING     0x0100
+
+/* Acquisition status bits */
+#define DB2K_ACQ_STATUS_RESULTS_FIFO_MORE_1_SAMPLE     0x0001
+#define DB2K_ACQ_STATUS_RESULTS_FIFO_HAS_DATA          0x0002
+#define DB2K_ACQ_STATUS_RESULTS_FIFO_OVERRUN           0x0004
+#define DB2K_ACQ_STATUS_LOGIC_SCANNING                 0x0008
+#define DB2K_ACQ_STATUS_CONFIG_PIPE_FULL               0x0010
+#define DB2K_ACQ_STATUS_SCAN_LIST_FIFO_EMPTY           0x0020
+#define DB2K_ACQ_STATUS_ADC_NOT_READY                  0x0040
+#define DB2K_ACQ_STATUS_ARBITRATION_FAILURE            0x0080
+#define DB2K_ACQ_STATUS_ADC_PACER_OVERRUN              0x0100
+#define DB2K_ACQ_STATUS_DAC_PACER_OVERRUN              0x0200
 
 /* DAC status */
-#define DAQBOARD2000_DacFull                     0x0001
-#define DAQBOARD2000_RefBusy                     0x0002
-#define DAQBOARD2000_TrgBusy                     0x0004
-#define DAQBOARD2000_CalBusy                     0x0008
-#define DAQBOARD2000_Dac0Busy                    0x0010
-#define DAQBOARD2000_Dac1Busy                    0x0020
-#define DAQBOARD2000_Dac2Busy                    0x0040
-#define DAQBOARD2000_Dac3Busy                    0x0080
+#define DB2K_DAC_STATUS_DAC_FULL                       0x0001
+#define DB2K_DAC_STATUS_REF_BUSY                       0x0002
+#define DB2K_DAC_STATUS_TRIG_BUSY                      0x0004
+#define DB2K_DAC_STATUS_CAL_BUSY                       0x0008
+#define DB2K_DAC_STATUS_DAC_BUSY(x)                    (0x0010 << (x))
 
 /* DAC control */
-#define DAQBOARD2000_Dac0Enable                  0x0021
-#define DAQBOARD2000_Dac1Enable                  0x0031
-#define DAQBOARD2000_Dac2Enable                  0x0041
-#define DAQBOARD2000_Dac3Enable                  0x0051
-#define DAQBOARD2000_DacEnableBit                0x0001
-#define DAQBOARD2000_Dac0Disable                 0x0020
-#define DAQBOARD2000_Dac1Disable                 0x0030
-#define DAQBOARD2000_Dac2Disable                 0x0040
-#define DAQBOARD2000_Dac3Disable                 0x0050
-#define DAQBOARD2000_DacResetFifo                0x0004
-#define DAQBOARD2000_DacPatternDisable           0x0060
-#define DAQBOARD2000_DacPatternEnable            0x0061
-#define DAQBOARD2000_DacSelectSignedData         0x0002
-#define DAQBOARD2000_DacSelectUnsignedData       0x0000
+#define DB2K_DAC_CONTROL_ENABLE_BIT                    0x0001
+#define DB2K_DAC_CONTROL_DATA_IS_SIGNED                        0x0002
+#define DB2K_DAC_CONTROL_RESET_FIFO                    0x0004
+#define DB2K_DAC_CONTROL_DAC_DISABLE(x)                        (0x0020 + ((x) << 4))
+#define DB2K_DAC_CONTROL_DAC_ENABLE(x)                 (0x0021 + ((x) << 4))
+#define DB2K_DAC_CONTROL_PATTERN_DISABLE               0x0060
+#define DB2K_DAC_CONTROL_PATTERN_ENABLE                        0x0061
 
 /* Trigger Control */
-#define DAQBOARD2000_TrigAnalog                  0x0000
-#define DAQBOARD2000_TrigTTL                     0x0010
-#define DAQBOARD2000_TrigTransHiLo               0x0004
-#define DAQBOARD2000_TrigTransLoHi               0x0000
-#define DAQBOARD2000_TrigAbove                   0x0000
-#define DAQBOARD2000_TrigBelow                   0x0004
-#define DAQBOARD2000_TrigLevelSense              0x0002
-#define DAQBOARD2000_TrigEdgeSense               0x0000
-#define DAQBOARD2000_TrigEnable                  0x0001
-#define DAQBOARD2000_TrigDisable                 0x0000
+#define DB2K_TRIG_CONTROL_TYPE_ANALOG                  0x0000
+#define DB2K_TRIG_CONTROL_TYPE_TTL                     0x0010
+#define DB2K_TRIG_CONTROL_EDGE_HI_LO                   0x0004
+#define DB2K_TRIG_CONTROL_EDGE_LO_HI                   0x0000
+#define DB2K_TRIG_CONTROL_LEVEL_ABOVE                  0x0000
+#define DB2K_TRIG_CONTROL_LEVEL_BELOW                  0x0004
+#define DB2K_TRIG_CONTROL_SENSE_LEVEL                  0x0002
+#define DB2K_TRIG_CONTROL_SENSE_EDGE                   0x0000
+#define DB2K_TRIG_CONTROL_ENABLE                       0x0001
+#define DB2K_TRIG_CONTROL_DISABLE                      0x0000
 
 /* Reference Dac Selection */
-#define DAQBOARD2000_PosRefDacSelect             0x0100
-#define DAQBOARD2000_NegRefDacSelect             0x0000
+#define DB2K_REF_DACS_SET                              0x0080
+#define DB2K_REF_DACS_SELECT_POS_REF                   0x0100
+#define DB2K_REF_DACS_SELECT_NEG_REF                   0x0000
 
 struct daq200_boardtype {
        const char *name;
        int id;
 };
+
 static const struct daq200_boardtype boardtypes[] = {
        {"ids2", DAQBOARD2000_SUBSYSTEM_IDS2},
        {"ids4", DAQBOARD2000_SUBSYSTEM_IDS4},
@@ -276,15 +265,16 @@ struct daqboard2000_private {
        void __iomem *plx;
 };
 
-static void writeAcqScanListEntry(struct comedi_device *dev, u16 entry)
+static void daqboard2000_write_acq_scan_list_entry(struct comedi_device *dev,
+                                                  u16 entry)
 {
-       /* udelay(4); */
-       writew(entry & 0x00ff, dev->mmio + acqScanListFIFO);
-       /* udelay(4); */
-       writew((entry >> 8) & 0x00ff, dev->mmio + acqScanListFIFO);
+       writew(entry & 0x00ff, dev->mmio + DB2K_REG_ACQ_SCAN_LIST_FIFO);
+       writew((entry >> 8) & 0x00ff,
+              dev->mmio + DB2K_REG_ACQ_SCAN_LIST_FIFO);
 }
 
-static void setup_sampling(struct comedi_device *dev, int chan, int gain)
+static void daqboard2000_setup_sampling(struct comedi_device *dev, int chan,
+                                       int gain)
 {
        u16 word0, word1, word2, word3;
 
@@ -315,17 +305,13 @@ static void setup_sampling(struct comedi_device *dev, int chan, int gain)
                word3 = 0;
                break;
        }
-/*
-  dev->eeprom.correctionDACSE[i][j][k].offset = 0x800;
-  dev->eeprom.correctionDACSE[i][j][k].gain = 0xc00;
-*/
        /* These should be read from EEPROM */
-       word2 |= 0x0800;
-       word3 |= 0xc000;
-       writeAcqScanListEntry(dev, word0);
-       writeAcqScanListEntry(dev, word1);
-       writeAcqScanListEntry(dev, word2);
-       writeAcqScanListEntry(dev, word3);
+       word2 |= 0x0800;        /* offset */
+       word3 |= 0xc000;        /* gain */
+       daqboard2000_write_acq_scan_list_entry(dev, word0);
+       daqboard2000_write_acq_scan_list_entry(dev, word1);
+       daqboard2000_write_acq_scan_list_entry(dev, word2);
+       daqboard2000_write_acq_scan_list_entry(dev, word3);
 }
 
 static int daqboard2000_ai_status(struct comedi_device *dev,
@@ -335,7 +321,7 @@ static int daqboard2000_ai_status(struct comedi_device *dev,
 {
        unsigned int status;
 
-       status = readw(dev->mmio + acqControl);
+       status = readw(dev->mmio + DB2K_REG_ACQ_STATUS);
        if (status & context)
                return 0;
        return -EBUSY;
@@ -350,50 +336,58 @@ static int daqboard2000_ai_insn_read(struct comedi_device *dev,
        int ret;
        int i;
 
-       writew(DAQBOARD2000_AcqResetScanListFifo |
-              DAQBOARD2000_AcqResetResultsFifo |
-              DAQBOARD2000_AcqResetConfigPipe, dev->mmio + acqControl);
+       writew(DB2K_ACQ_CONTROL_RESET_SCAN_LIST_FIFO |
+              DB2K_ACQ_CONTROL_RESET_RESULTS_FIFO |
+              DB2K_ACQ_CONTROL_RESET_CONFIG_PIPE,
+              dev->mmio + DB2K_REG_ACQ_CONTROL);
 
        /*
         * If pacer clock is not set to some high value (> 10 us), we
         * risk multiple samples to be put into the result FIFO.
         */
        /* 1 second, should be long enough */
-       writel(1000000, dev->mmio + acqPacerClockDivLow);
-       writew(0, dev->mmio + acqPacerClockDivHigh);
+       writel(1000000, dev->mmio + DB2K_REG_ACQ_PACER_CLOCK_DIV_LOW);
+       writew(0, dev->mmio + DB2K_REG_ACQ_PACER_CLOCK_DIV_HIGH);
 
        gain = CR_RANGE(insn->chanspec);
        chan = CR_CHAN(insn->chanspec);
 
-       /* This doesn't look efficient.  I decided to take the conservative
+       /*
+        * This doesn't look efficient.  I decided to take the conservative
         * approach when I did the insn conversion.  Perhaps it would be
         * better to have broken it completely, then someone would have been
-        * forced to fix it.  --ds */
+        * forced to fix it.  --ds
+        */
        for (i = 0; i < insn->n; i++) {
-               setup_sampling(dev, chan, gain);
+               daqboard2000_setup_sampling(dev, chan, gain);
                /* Enable reading from the scanlist FIFO */
-               writew(DAQBOARD2000_SeqStartScanList, dev->mmio + acqControl);
+               writew(DB2K_ACQ_CONTROL_SEQ_START_SCAN_LIST,
+                      dev->mmio + DB2K_REG_ACQ_CONTROL);
 
                ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
-                                    DAQBOARD2000_AcqConfigPipeFull);
+                                    DB2K_ACQ_STATUS_CONFIG_PIPE_FULL);
                if (ret)
                        return ret;
 
-               writew(DAQBOARD2000_AdcPacerEnable, dev->mmio + acqControl);
+               writew(DB2K_ACQ_CONTROL_ADC_PACER_ENABLE,
+                      dev->mmio + DB2K_REG_ACQ_CONTROL);
 
                ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
-                                    DAQBOARD2000_AcqLogicScanning);
+                                    DB2K_ACQ_STATUS_LOGIC_SCANNING);
                if (ret)
                        return ret;
 
-               ret = comedi_timeout(dev, s, insn, daqboard2000_ai_status,
-                                    DAQBOARD2000_AcqResultsFIFOHasValidData);
+               ret =
+               comedi_timeout(dev, s, insn, daqboard2000_ai_status,
+                              DB2K_ACQ_STATUS_RESULTS_FIFO_HAS_DATA);
                if (ret)
                        return ret;
 
-               data[i] = readw(dev->mmio + acqResultsFIFO);
-               writew(DAQBOARD2000_AdcPacerDisable, dev->mmio + acqControl);
-               writew(DAQBOARD2000_SeqStopScanList, dev->mmio + acqControl);
+               data[i] = readw(dev->mmio + DB2K_REG_ACQ_RESULTS_FIFO);
+               writew(DB2K_ACQ_CONTROL_ADC_PACER_DISABLE,
+                      dev->mmio + DB2K_REG_ACQ_CONTROL);
+               writew(DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST,
+                      dev->mmio + DB2K_REG_ACQ_CONTROL);
        }
 
        return i;
@@ -407,8 +401,8 @@ static int daqboard2000_ao_eoc(struct comedi_device *dev,
        unsigned int chan = CR_CHAN(insn->chanspec);
        unsigned int status;
 
-       status = readw(dev->mmio + dacControl);
-       if ((status & ((chan + 1) * 0x0010)) == 0)
+       status = readw(dev->mmio + DB2K_REG_DAC_STATUS);
+       if ((status & DB2K_DAC_STATUS_DAC_BUSY(chan)) == 0)
                return 0;
        return -EBUSY;
 }
@@ -425,7 +419,7 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
                unsigned int val = data[i];
                int ret;
 
-               writew(val, dev->mmio + dacSetting(chan));
+               writew(val, dev->mmio + DB2K_REG_DAC_SETTING(chan));
 
                ret = comedi_timeout(dev, s, insn, daqboard2000_ao_eoc, 0);
                if (ret)
@@ -437,39 +431,39 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
        return insn->n;
 }
 
-static void daqboard2000_resetLocalBus(struct comedi_device *dev)
+static void daqboard2000_reset_local_bus(struct comedi_device *dev)
 {
        struct daqboard2000_private *devpriv = dev->private;
 
-       writel(DAQBOARD2000_SECRLocalBusHi, devpriv->plx + 0x6c);
+       writel(DB2K_SECR_LOCAL_BUS_HI, devpriv->plx + 0x6c);
        mdelay(10);
-       writel(DAQBOARD2000_SECRLocalBusLo, devpriv->plx + 0x6c);
+       writel(DB2K_SECR_LOCAL_BUS_LO, devpriv->plx + 0x6c);
        mdelay(10);
 }
 
-static void daqboard2000_reloadPLX(struct comedi_device *dev)
+static void daqboard2000_reload_plx(struct comedi_device *dev)
 {
        struct daqboard2000_private *devpriv = dev->private;
 
-       writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
+       writel(DB2K_SECR_RELOAD_LO, devpriv->plx + 0x6c);
        mdelay(10);
-       writel(DAQBOARD2000_SECRReloadHi, devpriv->plx + 0x6c);
+       writel(DB2K_SECR_RELOAD_HI, devpriv->plx + 0x6c);
        mdelay(10);
-       writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
+       writel(DB2K_SECR_RELOAD_LO, devpriv->plx + 0x6c);
        mdelay(10);
 }
 
-static void daqboard2000_pulseProgPin(struct comedi_device *dev)
+static void daqboard2000_pulse_prog_pin(struct comedi_device *dev)
 {
        struct daqboard2000_private *devpriv = dev->private;
 
-       writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c);
+       writel(DB2K_SECR_PROG_PIN_HI, devpriv->plx + 0x6c);
        mdelay(10);
-       writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c);
+       writel(DB2K_SECR_PROG_PIN_LO, devpriv->plx + 0x6c);
        mdelay(10);     /* Not in the original code, but I like symmetry... */
 }
 
-static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask)
+static int daqboard2000_poll_cpld(struct comedi_device *dev, int mask)
 {
        int result = 0;
        int i;
@@ -482,17 +476,17 @@ static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask)
                        result = 1;
                        break;
                }
-               udelay(100);
+               usleep_range(100, 1000);
        }
        udelay(5);
        return result;
 }
 
-static int daqboard2000_writeCPLD(struct comedi_device *dev, int data)
+static int daqboard2000_write_cpld(struct comedi_device *dev, int data)
 {
        int result = 0;
 
-       udelay(10);
+       usleep_range(10, 20);
        writew(data, dev->mmio + 0x1000);
        if ((readw(dev->mmio + 0x1000) & DAQBOARD2000_CPLD_INIT) ==
            DAQBOARD2000_CPLD_INIT) {
@@ -501,9 +495,9 @@ static int daqboard2000_writeCPLD(struct comedi_device *dev, int data)
        return result;
 }
 
-static int initialize_daqboard2000(struct comedi_device *dev,
-                                  const u8 *cpld_array, size_t len,
-                                  unsigned long context)
+static int daqboard2000_load_firmware(struct comedi_device *dev,
+                                     const u8 *cpld_array, size_t len,
+                                     unsigned long context)
 {
        struct daqboard2000_private *devpriv = dev->private;
        int result = -EIO;
@@ -518,10 +512,10 @@ static int initialize_daqboard2000(struct comedi_device *dev,
                return -EIO;
 
        for (retry = 0; retry < 3; retry++) {
-               daqboard2000_resetLocalBus(dev);
-               daqboard2000_reloadPLX(dev);
-               daqboard2000_pulseProgPin(dev);
-               if (daqboard2000_pollCPLD(dev, DAQBOARD2000_CPLD_INIT)) {
+               daqboard2000_reset_local_bus(dev);
+               daqboard2000_reload_plx(dev);
+               daqboard2000_pulse_prog_pin(dev);
+               if (daqboard2000_poll_cpld(dev, DAQBOARD2000_CPLD_INIT)) {
                        for (i = 0; i < len; i++) {
                                if (cpld_array[i] == 0xff &&
                                    cpld_array[i + 1] == 0x20)
@@ -530,12 +524,12 @@ static int initialize_daqboard2000(struct comedi_device *dev,
                        for (; i < len; i += 2) {
                                int data =
                                    (cpld_array[i] << 8) + cpld_array[i + 1];
-                               if (!daqboard2000_writeCPLD(dev, data))
+                               if (!daqboard2000_write_cpld(dev, data))
                                        break;
                        }
                        if (i >= len) {
-                               daqboard2000_resetLocalBus(dev);
-                               daqboard2000_reloadPLX(dev);
+                               daqboard2000_reset_local_bus(dev);
+                               daqboard2000_reload_plx(dev);
                                result = 0;
                                break;
                        }
@@ -544,79 +538,83 @@ static int initialize_daqboard2000(struct comedi_device *dev,
        return result;
 }
 
-static void daqboard2000_adcStopDmaTransfer(struct comedi_device *dev)
+static void daqboard2000_adc_stop_dma_transfer(struct comedi_device *dev)
 {
 }
 
-static void daqboard2000_adcDisarm(struct comedi_device *dev)
+static void daqboard2000_adc_disarm(struct comedi_device *dev)
 {
        /* Disable hardware triggers */
        udelay(2);
-       writew(DAQBOARD2000_TrigAnalog | DAQBOARD2000_TrigDisable,
-              dev->mmio + trigControl);
+       writew(DB2K_TRIG_CONTROL_TYPE_ANALOG | DB2K_TRIG_CONTROL_DISABLE,
+              dev->mmio + DB2K_REG_TRIG_CONTROL);
        udelay(2);
-       writew(DAQBOARD2000_TrigTTL | DAQBOARD2000_TrigDisable,
-              dev->mmio + trigControl);
+       writew(DB2K_TRIG_CONTROL_TYPE_TTL | DB2K_TRIG_CONTROL_DISABLE,
+              dev->mmio + DB2K_REG_TRIG_CONTROL);
 
        /* Stop the scan list FIFO from loading the configuration pipe */
        udelay(2);
-       writew(DAQBOARD2000_SeqStopScanList, dev->mmio + acqControl);
+       writew(DB2K_ACQ_CONTROL_SEQ_STOP_SCAN_LIST,
+              dev->mmio + DB2K_REG_ACQ_CONTROL);
 
        /* Stop the pacer clock */
        udelay(2);
-       writew(DAQBOARD2000_AdcPacerDisable, dev->mmio + acqControl);
+       writew(DB2K_ACQ_CONTROL_ADC_PACER_DISABLE,
+              dev->mmio + DB2K_REG_ACQ_CONTROL);
 
        /* Stop the input dma (abort channel 1) */
-       daqboard2000_adcStopDmaTransfer(dev);
+       daqboard2000_adc_stop_dma_transfer(dev);
 }
 
-static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
+static void daqboard2000_activate_reference_dacs(struct comedi_device *dev)
 {
        unsigned int val;
        int timeout;
 
        /*  Set the + reference dac value in the FPGA */
-       writew(0x80 | DAQBOARD2000_PosRefDacSelect, dev->mmio + refDacs);
+       writew(DB2K_REF_DACS_SET | DB2K_REF_DACS_SELECT_POS_REF,
+              dev->mmio + DB2K_REG_REF_DACS);
        for (timeout = 0; timeout < 20; timeout++) {
-               val = readw(dev->mmio + dacControl);
-               if ((val & DAQBOARD2000_RefBusy) == 0)
+               val = readw(dev->mmio + DB2K_REG_DAC_STATUS);
+               if ((val & DB2K_DAC_STATUS_REF_BUSY) == 0)
                        break;
                udelay(2);
        }
 
        /*  Set the - reference dac value in the FPGA */
-       writew(0x80 | DAQBOARD2000_NegRefDacSelect, dev->mmio + refDacs);
+       writew(DB2K_REF_DACS_SET | DB2K_REF_DACS_SELECT_NEG_REF,
+              dev->mmio + DB2K_REG_REF_DACS);
        for (timeout = 0; timeout < 20; timeout++) {
-               val = readw(dev->mmio + dacControl);
-               if ((val & DAQBOARD2000_RefBusy) == 0)
+               val = readw(dev->mmio + DB2K_REG_DAC_STATUS);
+               if ((val & DB2K_DAC_STATUS_REF_BUSY) == 0)
                        break;
                udelay(2);
        }
 }
 
-static void daqboard2000_initializeCtrs(struct comedi_device *dev)
+static void daqboard2000_initialize_ctrs(struct comedi_device *dev)
 {
 }
 
-static void daqboard2000_initializeTmrs(struct comedi_device *dev)
+static void daqboard2000_initialize_tmrs(struct comedi_device *dev)
 {
 }
 
-static void daqboard2000_dacDisarm(struct comedi_device *dev)
+static void daqboard2000_dac_disarm(struct comedi_device *dev)
 {
 }
 
-static void daqboard2000_initializeAdc(struct comedi_device *dev)
+static void daqboard2000_initialize_adc(struct comedi_device *dev)
 {
-       daqboard2000_adcDisarm(dev);
-       daqboard2000_activateReferenceDacs(dev);
-       daqboard2000_initializeCtrs(dev);
-       daqboard2000_initializeTmrs(dev);
+       daqboard2000_adc_disarm(dev);
+       daqboard2000_activate_reference_dacs(dev);
+       daqboard2000_initialize_ctrs(dev);
+       daqboard2000_initialize_tmrs(dev);
 }
 
-static void daqboard2000_initializeDac(struct comedi_device *dev)
+static void daqboard2000_initialize_dac(struct comedi_device *dev)
 {
-       daqboard2000_dacDisarm(dev);
+       daqboard2000_dac_disarm(dev);
 }
 
 static int daqboard2000_8255_cb(struct comedi_device *dev,
@@ -683,12 +681,12 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
 
        result = comedi_load_firmware(dev, &comedi_to_pci_dev(dev)->dev,
                                      DAQBOARD2000_FIRMWARE,
-                                     initialize_daqboard2000, 0);
+                                     daqboard2000_load_firmware, 0);
        if (result < 0)
                return result;
 
-       daqboard2000_initializeAdc(dev);
-       daqboard2000_initializeDac(dev);
+       daqboard2000_initialize_adc(dev);
+       daqboard2000_initialize_dac(dev);
 
        s = &dev->subdevices[0];
        /* ai subdevice */
@@ -714,7 +712,7 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
 
        s = &dev->subdevices[2];
        return subdev_8255_init(dev, s, daqboard2000_8255_cb,
-                               dioP2ExpansionIO8Bit);
+                               DB2K_REG_DIO_P2_EXP_IO_8_BIT);
 }
 
 static void daqboard2000_detach(struct comedi_device *dev)
index fd8e0b76f7646362f2ba90a2ca9760f79d0a4069..5d157951f63fa8d9315b5e3f17afec4abdae9422 100644 (file)
 #define DAS16_AO_LSB_REG(x)            ((x) ? 0x06 : 0x04)
 #define DAS16_AO_MSB_REG(x)            ((x) ? 0x07 : 0x05)
 #define DAS16_STATUS_REG               0x08
-#define DAS16_STATUS_BUSY              (1 << 7)
-#define DAS16_STATUS_UNIPOLAR          (1 << 6)
-#define DAS16_STATUS_MUXBIT            (1 << 5)
-#define DAS16_STATUS_INT               (1 << 4)
+#define DAS16_STATUS_BUSY              BIT(7)
+#define DAS16_STATUS_UNIPOLAR          BIT(6)
+#define DAS16_STATUS_MUXBIT            BIT(5)
+#define DAS16_STATUS_INT               BIT(4)
 #define DAS16_CTRL_REG                 0x09
-#define DAS16_CTRL_INTE                        (1 << 7)
+#define DAS16_CTRL_INTE                        BIT(7)
 #define DAS16_CTRL_IRQ(x)              (((x) & 0x7) << 4)
-#define DAS16_CTRL_DMAE                        (1 << 2)
+#define DAS16_CTRL_DMAE                        BIT(2)
 #define DAS16_CTRL_PACING_MASK         (3 << 0)
 #define DAS16_CTRL_INT_PACER           (3 << 0)
 #define DAS16_CTRL_EXT_PACER           (2 << 0)
 #define DAS16_CTRL_SOFT_PACER          (0 << 0)
 #define DAS16_PACER_REG                        0x0a
 #define DAS16_PACER_BURST_LEN(x)       (((x) & 0xf) << 4)
-#define DAS16_PACER_CTR0               (1 << 1)
-#define DAS16_PACER_TRIG0              (1 << 0)
+#define DAS16_PACER_CTR0               BIT(1)
+#define DAS16_PACER_TRIG0              BIT(0)
 #define DAS16_GAIN_REG                 0x0b
 #define DAS16_TIMER_BASE_REG           0x0c    /* to 0x0f */
 
 #define DAS1600_CONV_REG               0x404
-#define DAS1600_CONV_DISABLE           (1 << 6)
+#define DAS1600_CONV_DISABLE           BIT(6)
 #define DAS1600_BURST_REG              0x405
-#define DAS1600_BURST_VAL              (1 << 6)
+#define DAS1600_BURST_VAL              BIT(6)
 #define DAS1600_ENABLE_REG             0x406
-#define DAS1600_ENABLE_VAL             (1 << 6)
+#define DAS1600_ENABLE_VAL             BIT(6)
 #define DAS1600_STATUS_REG             0x407
-#define DAS1600_STATUS_BME             (1 << 6)
-#define DAS1600_STATUS_ME              (1 << 5)
-#define DAS1600_STATUS_CD              (1 << 4)
-#define DAS1600_STATUS_WS              (1 << 1)
-#define DAS1600_STATUS_CLK_10MHZ       (1 << 0)
+#define DAS1600_STATUS_BME             BIT(6)
+#define DAS1600_STATUS_ME              BIT(5)
+#define DAS1600_STATUS_CD              BIT(4)
+#define DAS1600_STATUS_WS              BIT(1)
+#define DAS1600_STATUS_CLK_10MHZ       BIT(0)
 
 static const struct comedi_lrange range_das1x01_bip = {
        4, {
@@ -198,6 +198,7 @@ enum {
        das16_pg_1601,
        das16_pg_1602,
 };
+
 static const int *const das16_gainlists[] = {
        NULL,
        das16jr_gainlist,
@@ -428,8 +429,10 @@ static const struct das16_board das16_boards[] = {
        },
 };
 
-/* Period for timer interrupt in jiffies.  It's a function
- * to deal with possibility of dynamic HZ patches  */
+/*
+ * Period for timer interrupt in jiffies.  It's a function
+ * to deal with possibility of dynamic HZ patches
+ */
 static inline int timer_period(void)
 {
        return HZ / 20;
index 3a37373fbb6f15b482bc25c34c1acc787b497b1e..bb8d6ec0632ef7043845417d975b273a2bd5c227 100644 (file)
@@ -1,56 +1,52 @@
 /*
-    comedi/drivers/das16m1.c
-    CIO-DAS16/M1 driver
-    Author: Frank Mori Hess, based on code from the das16
-      driver.
-    Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net>
-
-    COMEDI - Linux Control and Measurement Device Interface
-    Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
-    This program is free software; you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation; either version 2 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-*/
+ * Comedi driver for CIO-DAS16/M1
+ * Author: Frank Mori Hess, based on code from the das16 driver.
+ * Copyright (C) 2001 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
 /*
-Driver: das16m1
-Description: CIO-DAS16/M1
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Measurement Computing] CIO-DAS16/M1 (das16m1)
-Status: works
-
-This driver supports a single board - the CIO-DAS16/M1.
-As far as I know, there are no other boards that have
-the same register layout.  Even the CIO-DAS16/M1/16 is
-significantly different.
-
-I was _barely_ able to reach the full 1 MHz capability
-of this board, using a hard real-time interrupt
-(set the TRIG_RT flag in your struct comedi_cmd and use
-rtlinux or RTAI).  The board can't do dma, so the bottleneck is
-pulling the data across the ISA bus.  I timed the interrupt
-handler, and it took my computer ~470 microseconds to pull 512
-samples from the board.  So at 1 Mhz sampling rate,
-expect your CPU to be spending almost all of its
-time in the interrupt handler.
-
-This board has some unusual restrictions for its channel/gain list.  If the
-list has 2 or more channels in it, then two conditions must be satisfied:
-(1) - even/odd channels must appear at even/odd indices in the list
-(2) - the list must have an even number of entries.
-
-Options:
-       [0] - base io address
-       [1] - irq (optional, but you probably want it)
-
-irq can be omitted, although the cmd interface will not work without it.
-*/
+ * Driver: das16m1
+ * Description: CIO-DAS16/M1
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [Measurement Computing] CIO-DAS16/M1 (das16m1)
+ * Status: works
+ *
+ * This driver supports a single board - the CIO-DAS16/M1. As far as I know,
+ * there are no other boards that have the same register layout. Even the
+ * CIO-DAS16/M1/16 is significantly different.
+ *
+ * I was _barely_ able to reach the full 1 MHz capability of this board, using
+ * a hard real-time interrupt (set the TRIG_RT flag in your struct comedi_cmd
+ * and use rtlinux or RTAI). The board can't do dma, so the bottleneck is
+ * pulling the data across the ISA bus. I timed the interrupt handler, and it
+ * took my computer ~470 microseconds to pull 512 samples from the board. So
+ * at 1 Mhz sampling rate, expect your CPU to be spending almost all of its
+ * time in the interrupt handler.
+ *
+ * This board has some unusual restrictions for its channel/gain list.  If the
+ * list has 2 or more channels in it, then two conditions must be satisfied:
+ * (1) - even/odd channels must appear at even/odd indices in the list
+ * (2) - the list must have an even number of entries.
+ *
+ * Configuration options:
+ *   [0] - base io address
+ *   [1] - irq (optional, but you probably want it)
+ *
+ * irq can be omitted, although the cmd interface will not work without it.
+ */
 
 #include <linux/module.h>
 #include <linux/slab.h>
@@ -60,52 +56,38 @@ irq can be omitted, although the cmd interface will not work without it.
 #include "8255.h"
 #include "comedi_8254.h"
 
-#define DAS16M1_SIZE2 8
-
-#define FIFO_SIZE 1024         /*  1024 sample fifo */
-
 /*
-    CIO-DAS16_M1.pdf
-
-    "cio-das16/m1"
-
-  0            a/d bits 0-3, mux               start 12 bit
-  1            a/d bits 4-11           unused
-  2            status          control
-  3            di 4 bit                do 4 bit
-  4            unused                  clear interrupt
-  5            interrupt, pacer
-  6            channel/gain queue address
-  7            channel/gain queue data
-  89ab         8254
-  cdef         8254
-  400          8255
-  404-407      8254
-
-*/
-
-#define DAS16M1_AI             0       /*  16-bit wide register */
-#define   AI_CHAN(x)             ((x) & 0xf)
-#define DAS16M1_CS             2
-#define   EXT_TRIG_BIT           0x1
-#define   OVRUN                  0x20
-#define   IRQDATA                0x80
-#define DAS16M1_DIO            3
-#define DAS16M1_CLEAR_INTR     4
-#define DAS16M1_INTR_CONTROL   5
-#define   EXT_PACER              0x2
-#define   INT_PACER              0x3
-#define   PACER_MASK             0x3
-#define   INTE                   0x80
-#define DAS16M1_QUEUE_ADDR     6
-#define DAS16M1_QUEUE_DATA     7
-#define   Q_CHAN(x)              ((x) & 0x7)
-#define   Q_RANGE(x)             (((x) & 0xf) << 4)
-#define   UNIPOLAR               0x40
-#define DAS16M1_8254_FIRST             0x8
-#define DAS16M1_8254_SECOND            0xc
-#define DAS16M1_82C55                  0x400
-#define DAS16M1_8254_THIRD             0x404
+ * Register map (dev->iobase)
+ */
+#define DAS16M1_AI_REG                 0x00    /* 16-bit register */
+#define DAS16M1_AI_TO_CHAN(x)          (((x) >> 0) & 0xf)
+#define DAS16M1_AI_TO_SAMPLE(x)                (((x) >> 4) & 0xfff)
+#define DAS16M1_CS_REG                 0x02
+#define DAS16M1_CS_EXT_TRIG            BIT(0)
+#define DAS16M1_CS_OVRUN               BIT(5)
+#define DAS16M1_CS_IRQDATA             BIT(7)
+#define DAS16M1_DI_REG                 0x03
+#define DAS16M1_DO_REG                 0x03
+#define DAS16M1_CLR_INTR_REG           0x04
+#define DAS16M1_INTR_CTRL_REG          0x05
+#define DAS16M1_INTR_CTRL_PACER(x)     (((x) & 0x3) << 0)
+#define DAS16M1_INTR_CTRL_PACER_EXT    DAS16M1_INTR_CTRL_PACER(2)
+#define DAS16M1_INTR_CTRL_PACER_INT    DAS16M1_INTR_CTRL_PACER(3)
+#define DAS16M1_INTR_CTRL_PACER_MASK   DAS16M1_INTR_CTRL_PACER(3)
+#define DAS16M1_INTR_CTRL_IRQ(x)       (((x) & 0x7) << 4)
+#define DAS16M1_INTR_CTRL_INTE         BIT(7)
+#define DAS16M1_Q_ADDR_REG             0x06
+#define DAS16M1_Q_REG                  0x07
+#define DAS16M1_Q_CHAN(x)              (((x) & 0x7) << 0)
+#define DAS16M1_Q_RANGE(x)             (((x) & 0xf) << 4)
+#define DAS16M1_8254_IOBASE1           0x08
+#define DAS16M1_8254_IOBASE2           0x0c
+#define DAS16M1_8255_IOBASE            0x400
+#define DAS16M1_8254_IOBASE3           0x404
+
+#define DAS16M1_SIZE2                  0x08
+
+#define DAS16M1_AI_FIFO_SZ             1024    /* # samples */
 
 static const struct comedi_lrange range_das16m1 = {
        9, {
@@ -121,29 +103,46 @@ static const struct comedi_lrange range_das16m1 = {
        }
 };
 
-struct das16m1_private_struct {
+struct das16m1_private {
        struct comedi_8254 *counter;
-       unsigned int control_state;
-       unsigned int adc_count; /*  number of samples completed */
-       /* initial value in lower half of hardware conversion counter,
-        * needed to keep track of whether new count has been loaded into
-        * counter yet (loaded by first sample conversion) */
+       unsigned int intr_ctrl;
+       unsigned int adc_count;
        u16 initial_hw_count;
-       unsigned short ai_buffer[FIFO_SIZE];
+       unsigned short ai_buffer[DAS16M1_AI_FIFO_SZ];
        unsigned long extra_iobase;
 };
 
-static inline unsigned short munge_sample(unsigned short data)
+static void das16m1_ai_set_queue(struct comedi_device *dev,
+                                unsigned int *chanspec, unsigned int len)
 {
-       return (data >> 4) & 0xfff;
+       unsigned int i;
+
+       for (i = 0; i < len; i++) {
+               unsigned int chan = CR_CHAN(chanspec[i]);
+               unsigned int range = CR_RANGE(chanspec[i]);
+
+               outb(i, dev->iobase + DAS16M1_Q_ADDR_REG);
+               outb(DAS16M1_Q_CHAN(chan) | DAS16M1_Q_RANGE(range),
+                    dev->iobase + DAS16M1_Q_REG);
+       }
 }
 
-static void munge_sample_array(unsigned short *array, unsigned int num_elements)
+static void das16m1_ai_munge(struct comedi_device *dev,
+                            struct comedi_subdevice *s,
+                            void *data, unsigned int num_bytes,
+                            unsigned int start_chan_index)
 {
+       unsigned short *array = data;
+       unsigned int nsamples = comedi_bytes_to_samples(s, num_bytes);
        unsigned int i;
 
-       for (i = 0; i < num_elements; i++)
-               array[i] = munge_sample(array[i]);
+       /*
+        * The fifo values have the channel number in the lower 4-bits and
+        * the sample in the upper 12-bits. This just shifts the values
+        * to remove the channel numbers.
+        */
+       for (i = 0; i < nsamples; i++)
+               array[i] = DAS16M1_AI_TO_SAMPLE(array[i]);
 }
 
 static int das16m1_ai_check_chanlist(struct comedi_device *dev,
@@ -174,8 +173,9 @@ static int das16m1_ai_check_chanlist(struct comedi_device *dev,
        return 0;
 }
 
-static int das16m1_cmd_test(struct comedi_device *dev,
-                           struct comedi_subdevice *s, struct comedi_cmd *cmd)
+static int das16m1_ai_cmdtest(struct comedi_device *dev,
+                             struct comedi_subdevice *s,
+                             struct comedi_cmd *cmd)
 {
        int err = 0;
 
@@ -245,17 +245,13 @@ static int das16m1_cmd_test(struct comedi_device *dev,
        return 0;
 }
 
-static int das16m1_cmd_exec(struct comedi_device *dev,
-                           struct comedi_subdevice *s)
+static int das16m1_ai_cmd(struct comedi_device *dev,
+                         struct comedi_subdevice *s)
 {
-       struct das16m1_private_struct *devpriv = dev->private;
+       struct das16m1_private *devpriv = dev->private;
        struct comedi_async *async = s->async;
        struct comedi_cmd *cmd = &async->cmd;
-       unsigned int byte, i;
-
-       /* disable interrupts and internal pacer */
-       devpriv->control_state &= ~INTE & ~PACER_MASK;
-       outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+       unsigned int byte;
 
        /*  set software count */
        devpriv->adc_count = 0;
@@ -274,48 +270,47 @@ static int das16m1_cmd_exec(struct comedi_device *dev,
         */
        devpriv->initial_hw_count = comedi_8254_read(devpriv->counter, 1);
 
-       /* setup channel/gain queue */
-       for (i = 0; i < cmd->chanlist_len; i++) {
-               outb(i, dev->iobase + DAS16M1_QUEUE_ADDR);
-               byte =
-                   Q_CHAN(CR_CHAN(cmd->chanlist[i])) |
-                   Q_RANGE(CR_RANGE(cmd->chanlist[i]));
-               outb(byte, dev->iobase + DAS16M1_QUEUE_DATA);
-       }
+       das16m1_ai_set_queue(dev, cmd->chanlist, cmd->chanlist_len);
 
        /* enable interrupts and set internal pacer counter mode and counts */
-       devpriv->control_state &= ~PACER_MASK;
+       devpriv->intr_ctrl &= ~DAS16M1_INTR_CTRL_PACER_MASK;
        if (cmd->convert_src == TRIG_TIMER) {
                comedi_8254_update_divisors(dev->pacer);
                comedi_8254_pacer_enable(dev->pacer, 1, 2, true);
-               devpriv->control_state |= INT_PACER;
+               devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_PACER_INT;
        } else {        /* TRIG_EXT */
-               devpriv->control_state |= EXT_PACER;
+               devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_PACER_EXT;
        }
 
        /*  set control & status register */
        byte = 0;
-       /* if we are using external start trigger (also board dislikes having
-        * both start and conversion triggers external simultaneously) */
+       /*
+        * If we are using external start trigger (also board dislikes having
+        * both start and conversion triggers external simultaneously).
+        */
        if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT)
-               byte |= EXT_TRIG_BIT;
+               byte |= DAS16M1_CS_EXT_TRIG;
 
-       outb(byte, dev->iobase + DAS16M1_CS);
-       /* clear interrupt bit */
-       outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
+       outb(byte, dev->iobase + DAS16M1_CS_REG);
+
+       /* clear interrupt */
+       outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
 
-       devpriv->control_state |= INTE;
-       outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+       devpriv->intr_ctrl |= DAS16M1_INTR_CTRL_INTE;
+       outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
 
        return 0;
 }
 
-static int das16m1_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+static int das16m1_ai_cancel(struct comedi_device *dev,
+                            struct comedi_subdevice *s)
 {
-       struct das16m1_private_struct *devpriv = dev->private;
+       struct das16m1_private *devpriv = dev->private;
 
-       devpriv->control_state &= ~INTE & ~PACER_MASK;
-       outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+       /* disable interrupts and pacer */
+       devpriv->intr_ctrl &= ~(DAS16M1_INTR_CTRL_INTE |
+                               DAS16M1_INTR_CTRL_PACER_MASK);
+       outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
 
        return 0;
 }
@@ -327,67 +322,58 @@ static int das16m1_ai_eoc(struct comedi_device *dev,
 {
        unsigned int status;
 
-       status = inb(dev->iobase + DAS16M1_CS);
-       if (status & IRQDATA)
+       status = inb(dev->iobase + DAS16M1_CS_REG);
+       if (status & DAS16M1_CS_IRQDATA)
                return 0;
        return -EBUSY;
 }
 
-static int das16m1_ai_rinsn(struct comedi_device *dev,
-                           struct comedi_subdevice *s,
-                           struct comedi_insn *insn, unsigned int *data)
+static int das16m1_ai_insn_read(struct comedi_device *dev,
+                               struct comedi_subdevice *s,
+                               struct comedi_insn *insn,
+                               unsigned int *data)
 {
-       struct das16m1_private_struct *devpriv = dev->private;
        int ret;
-       int n;
-       int byte;
-
-       /* disable interrupts and internal pacer */
-       devpriv->control_state &= ~INTE & ~PACER_MASK;
-       outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
-
-       /* setup channel/gain queue */
-       outb(0, dev->iobase + DAS16M1_QUEUE_ADDR);
-       byte =
-           Q_CHAN(CR_CHAN(insn->chanspec)) | Q_RANGE(CR_RANGE(insn->chanspec));
-       outb(byte, dev->iobase + DAS16M1_QUEUE_DATA);
-
-       for (n = 0; n < insn->n; n++) {
-               /* clear IRQDATA bit */
-               outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
+       int i;
+
+       das16m1_ai_set_queue(dev, &insn->chanspec, 1);
+
+       for (i = 0; i < insn->n; i++) {
+               unsigned short val;
+
+               /* clear interrupt */
+               outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
                /* trigger conversion */
-               outb(0, dev->iobase);
+               outb(0, dev->iobase + DAS16M1_AI_REG);
 
                ret = comedi_timeout(dev, s, insn, das16m1_ai_eoc, 0);
                if (ret)
                        return ret;
 
-               data[n] = munge_sample(inw(dev->iobase));
+               val = inw(dev->iobase + DAS16M1_AI_REG);
+               data[i] = DAS16M1_AI_TO_SAMPLE(val);
        }
 
-       return n;
+       return insn->n;
 }
 
-static int das16m1_di_rbits(struct comedi_device *dev,
-                           struct comedi_subdevice *s,
-                           struct comedi_insn *insn, unsigned int *data)
+static int das16m1_di_insn_bits(struct comedi_device *dev,
+                               struct comedi_subdevice *s,
+                               struct comedi_insn *insn,
+                               unsigned int *data)
 {
-       unsigned int bits;
-
-       bits = inb(dev->iobase + DAS16M1_DIO) & 0xf;
-       data[1] = bits;
-       data[0] = 0;
+       data[1] = inb(dev->iobase + DAS16M1_DI_REG) & 0xf;
 
        return insn->n;
 }
 
-static int das16m1_do_wbits(struct comedi_device *dev,
-                           struct comedi_subdevice *s,
-                           struct comedi_insn *insn,
-                           unsigned int *data)
+static int das16m1_do_insn_bits(struct comedi_device *dev,
+                               struct comedi_subdevice *s,
+                               struct comedi_insn *insn,
+                               unsigned int *data)
 {
        if (comedi_dio_update_state(s, data))
-               outb(s->state, dev->iobase + DAS16M1_DIO);
+               outb(s->state, dev->iobase + DAS16M1_DO_REG);
 
        data[1] = s->state;
 
@@ -396,33 +382,33 @@ static int das16m1_do_wbits(struct comedi_device *dev,
 
 static void das16m1_handler(struct comedi_device *dev, unsigned int status)
 {
-       struct das16m1_private_struct *devpriv = dev->private;
-       struct comedi_subdevice *s;
-       struct comedi_async *async;
-       struct comedi_cmd *cmd;
+       struct das16m1_private *devpriv = dev->private;
+       struct comedi_subdevice *s = dev->read_subdev;
+       struct comedi_async *async = s->async;
+       struct comedi_cmd *cmd = &async->cmd;
        u16 num_samples;
        u16 hw_counter;
 
-       s = dev->read_subdev;
-       async = s->async;
-       cmd = &async->cmd;
-
        /* figure out how many samples are in fifo */
        hw_counter = comedi_8254_read(devpriv->counter, 1);
-       /* make sure hardware counter reading is not bogus due to initial value
-        * not having been loaded yet */
+       /*
+        * Make sure hardware counter reading is not bogus due to initial
+        * value not having been loaded yet.
+        */
        if (devpriv->adc_count == 0 &&
            hw_counter == devpriv->initial_hw_count) {
                num_samples = 0;
        } else {
-               /* The calculation of num_samples looks odd, but it uses the
+               /*
+                * The calculation of num_samples looks odd, but it uses the
                 * following facts. 16 bit hardware counter is initialized with
                 * value of zero (which really means 0x1000).  The counter
                 * decrements by one on each conversion (when the counter
                 * decrements from zero it goes to 0xffff).  num_samples is a
                 * 16 bit variable, so it will roll over in a similar fashion
                 * to the hardware counter.  Work it out, and this is what you
-                * get. */
+                * get.
+                */
                num_samples = -hw_counter - devpriv->adc_count;
        }
        /*  check if we only need some of the points */
@@ -431,10 +417,9 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
                        num_samples = cmd->stop_arg * cmd->chanlist_len;
        }
        /*  make sure we dont try to get too many points if fifo has overrun */
-       if (num_samples > FIFO_SIZE)
-               num_samples = FIFO_SIZE;
+       if (num_samples > DAS16M1_AI_FIFO_SZ)
+               num_samples = DAS16M1_AI_FIFO_SZ;
        insw(dev->iobase, devpriv->ai_buffer, num_samples);
-       munge_sample_array(devpriv->ai_buffer, num_samples);
        comedi_buf_write_samples(s, devpriv->ai_buffer, num_samples);
        devpriv->adc_count += num_samples;
 
@@ -445,9 +430,11 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
                }
        }
 
-       /* this probably won't catch overruns since the card doesn't generate
-        * overrun interrupts, but we might as well try */
-       if (status & OVRUN) {
+       /*
+        * This probably won't catch overruns since the card doesn't generate
+        * overrun interrupts, but we might as well try.
+        */
+       if (status & DAS16M1_CS_OVRUN) {
                async->events |= COMEDI_CB_ERROR;
                dev_err(dev->class_dev, "fifo overflow\n");
        }
@@ -455,14 +442,15 @@ static void das16m1_handler(struct comedi_device *dev, unsigned int status)
        comedi_handle_events(dev, s);
 }
 
-static int das16m1_poll(struct comedi_device *dev, struct comedi_subdevice *s)
+static int das16m1_ai_poll(struct comedi_device *dev,
+                          struct comedi_subdevice *s)
 {
        unsigned long flags;
        unsigned int status;
 
        /*  prevent race with interrupt handler */
        spin_lock_irqsave(&dev->spinlock, flags);
-       status = inb(dev->iobase + DAS16M1_CS);
+       status = inb(dev->iobase + DAS16M1_CS_REG);
        das16m1_handler(dev, status);
        spin_unlock_irqrestore(&dev->spinlock, flags);
 
@@ -481,9 +469,9 @@ static irqreturn_t das16m1_interrupt(int irq, void *d)
        /*  prevent race with comedi_poll() */
        spin_lock(&dev->spinlock);
 
-       status = inb(dev->iobase + DAS16M1_CS);
+       status = inb(dev->iobase + DAS16M1_CS_REG);
 
-       if ((status & (IRQDATA | OVRUN)) == 0) {
+       if ((status & (DAS16M1_CS_IRQDATA | DAS16M1_CS_OVRUN)) == 0) {
                dev_err(dev->class_dev, "spurious interrupt\n");
                spin_unlock(&dev->spinlock);
                return IRQ_NONE;
@@ -492,7 +480,7 @@ static irqreturn_t das16m1_interrupt(int irq, void *d)
        das16m1_handler(dev, status);
 
        /* clear interrupt */
-       outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
+       outb(0, dev->iobase + DAS16M1_CLR_INTR_REG);
 
        spin_unlock(&dev->spinlock);
        return IRQ_HANDLED;
@@ -522,15 +510,10 @@ static int das16m1_irq_bits(unsigned int irq)
        }
 }
 
-/*
- * Options list:
- *   0  I/O base
- *   1  IRQ
- */
 static int das16m1_attach(struct comedi_device *dev,
                          struct comedi_devconfig *it)
 {
-       struct das16m1_private_struct *devpriv;
+       struct das16m1_private *devpriv;
        struct comedi_subdevice *s;
        int ret;
 
@@ -541,12 +524,12 @@ static int das16m1_attach(struct comedi_device *dev,
        ret = comedi_request_region(dev, it->options[0], 0x10);
        if (ret)
                return ret;
-       /* Request an additional region for the 8255 */
-       ret = __comedi_request_region(dev, dev->iobase + DAS16M1_82C55,
+       /* Request an additional region for the 8255 and 3rd 8254 */
+       ret = __comedi_request_region(dev, dev->iobase + DAS16M1_8255_IOBASE,
                                      DAS16M1_SIZE2);
        if (ret)
                return ret;
-       devpriv->extra_iobase = dev->iobase + DAS16M1_82C55;
+       devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE;
 
        /* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */
        if ((1 << it->options[1]) & 0xdcfc) {
@@ -556,12 +539,12 @@ static int das16m1_attach(struct comedi_device *dev,
                        dev->irq = it->options[1];
        }
 
-       dev->pacer = comedi_8254_init(dev->iobase + DAS16M1_8254_SECOND,
+       dev->pacer = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE2,
                                      I8254_OSC_BASE_10MHZ, I8254_IO8, 0);
        if (!dev->pacer)
                return -ENOMEM;
 
-       devpriv->counter = comedi_8254_init(dev->iobase + DAS16M1_8254_FIRST,
+       devpriv->counter = comedi_8254_init(dev->iobase + DAS16M1_8254_IOBASE1,
                                            0, I8254_IO8, 0);
        if (!devpriv->counter)
                return -ENOMEM;
@@ -570,61 +553,62 @@ static int das16m1_attach(struct comedi_device *dev,
        if (ret)
                return ret;
 
+       /* Analog Input subdevice */
        s = &dev->subdevices[0];
-       /* ai */
-       s->type = COMEDI_SUBD_AI;
-       s->subdev_flags = SDF_READABLE | SDF_DIFF;
-       s->n_chan = 8;
-       s->maxdata = (1 << 12) - 1;
-       s->range_table = &range_das16m1;
-       s->insn_read = das16m1_ai_rinsn;
+       s->type         = COMEDI_SUBD_AI;
+       s->subdev_flags = SDF_READABLE | SDF_DIFF;
+       s->n_chan       = 8;
+       s->maxdata      = 0x0fff;
+       s->range_table  = &range_das16m1;
+       s->insn_read    = das16m1_ai_insn_read;
        if (dev->irq) {
                dev->read_subdev = s;
-               s->subdev_flags |= SDF_CMD_READ;
-               s->len_chanlist = 256;
-               s->do_cmdtest = das16m1_cmd_test;
-               s->do_cmd = das16m1_cmd_exec;
-               s->cancel = das16m1_cancel;
-               s->poll = das16m1_poll;
+               s->subdev_flags |= SDF_CMD_READ;
+               s->len_chanlist = 256;
+               s->do_cmdtest   = das16m1_ai_cmdtest;
+               s->do_cmd       = das16m1_ai_cmd;
+               s->cancel       = das16m1_ai_cancel;
+               s->poll         = das16m1_ai_poll;
+               s->munge        = das16m1_ai_munge;
        }
 
+       /* Digital Input subdevice */
        s = &dev->subdevices[1];
-       /* di */
-       s->type = COMEDI_SUBD_DI;
-       s->subdev_flags = SDF_READABLE;
-       s->n_chan = 4;
-       s->maxdata = 1;
-       s->range_table = &range_digital;
-       s->insn_bits = das16m1_di_rbits;
-
+       s->type         = COMEDI_SUBD_DI;
+       s->subdev_flags = SDF_READABLE;
+       s->n_chan       = 4;
+       s->maxdata      = 1;
+       s->range_table  = &range_digital;
+       s->insn_bits    = das16m1_di_insn_bits;
+
+       /* Digital Output subdevice */
        s = &dev->subdevices[2];
-       /* do */
-       s->type = COMEDI_SUBD_DO;
-       s->subdev_flags = SDF_WRITABLE;
-       s->n_chan = 4;
-       s->maxdata = 1;
-       s->range_table = &range_digital;
-       s->insn_bits = das16m1_do_wbits;
-
+       s->type         = COMEDI_SUBD_DO;
+       s->subdev_flags = SDF_WRITABLE;
+       s->n_chan       = 4;
+       s->maxdata      = 1;
+       s->range_table  = &range_digital;
+       s->insn_bits    = das16m1_do_insn_bits;
+
+       /* Digital I/O subdevice (8255) */
        s = &dev->subdevices[3];
-       /* 8255 */
-       ret = subdev_8255_init(dev, s, NULL, DAS16M1_82C55);
+       ret = subdev_8255_init(dev, s, NULL, DAS16M1_8255_IOBASE);
        if (ret)
                return ret;
 
        /*  initialize digital output lines */
-       outb(0, dev->iobase + DAS16M1_DIO);
+       outb(0, dev->iobase + DAS16M1_DO_REG);
 
        /* set the interrupt level */
-       devpriv->control_state = das16m1_irq_bits(dev->irq) << 4;
-       outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
+       devpriv->intr_ctrl = DAS16M1_INTR_CTRL_IRQ(das16m1_irq_bits(dev->irq));
+       outb(devpriv->intr_ctrl, dev->iobase + DAS16M1_INTR_CTRL_REG);
 
        return 0;
 }
 
 static void das16m1_detach(struct comedi_device *dev)
 {
-       struct das16m1_private_struct *devpriv = dev->private;
+       struct das16m1_private *devpriv = dev->private;
 
        if (devpriv) {
                if (devpriv->extra_iobase)
@@ -643,5 +627,5 @@ static struct comedi_driver das16m1_driver = {
 module_comedi_driver(das16m1_driver);
 
 MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for CIO-DAS16/M1 ISA cards");
 MODULE_LICENSE("GPL");
index 1701294b79cdd59eee72612de6ec278c950b0a05..0fdf5e02182fb3459f3eea3bbbce50ff4c1419c3 100644 (file)
 #define DAS6402_AO_LSB_REG(x)          (0x04 + ((x) * 2))
 #define DAS6402_AO_MSB_REG(x)          (0x05 + ((x) * 2))
 #define DAS6402_STATUS_REG             0x08
-#define DAS6402_STATUS_FFNE            (1 << 0)
-#define DAS6402_STATUS_FHALF           (1 << 1)
-#define DAS6402_STATUS_FFULL           (1 << 2)
-#define DAS6402_STATUS_XINT            (1 << 3)
-#define DAS6402_STATUS_INT             (1 << 4)
-#define DAS6402_STATUS_XTRIG           (1 << 5)
-#define DAS6402_STATUS_INDGT           (1 << 6)
-#define DAS6402_STATUS_10MHZ           (1 << 7)
-#define DAS6402_STATUS_W_CLRINT                (1 << 0)
-#define DAS6402_STATUS_W_CLRXTR                (1 << 1)
-#define DAS6402_STATUS_W_CLRXIN                (1 << 2)
-#define DAS6402_STATUS_W_EXTEND                (1 << 4)
-#define DAS6402_STATUS_W_ARMED         (1 << 5)
-#define DAS6402_STATUS_W_POSTMODE      (1 << 6)
-#define DAS6402_STATUS_W_10MHZ         (1 << 7)
+#define DAS6402_STATUS_FFNE            BIT(0)
+#define DAS6402_STATUS_FHALF           BIT(1)
+#define DAS6402_STATUS_FFULL           BIT(2)
+#define DAS6402_STATUS_XINT            BIT(3)
+#define DAS6402_STATUS_INT             BIT(4)
+#define DAS6402_STATUS_XTRIG           BIT(5)
+#define DAS6402_STATUS_INDGT           BIT(6)
+#define DAS6402_STATUS_10MHZ           BIT(7)
+#define DAS6402_STATUS_W_CLRINT                BIT(0)
+#define DAS6402_STATUS_W_CLRXTR                BIT(1)
+#define DAS6402_STATUS_W_CLRXIN                BIT(2)
+#define DAS6402_STATUS_W_EXTEND                BIT(4)
+#define DAS6402_STATUS_W_ARMED         BIT(5)
+#define DAS6402_STATUS_W_POSTMODE      BIT(6)
+#define DAS6402_STATUS_W_10MHZ         BIT(7)
 #define DAS6402_CTRL_REG               0x09
-#define DAS6402_CTRL_SOFT_TRIG         (0 << 0)
-#define DAS6402_CTRL_EXT_FALL_TRIG     (1 << 0)
-#define DAS6402_CTRL_EXT_RISE_TRIG     (2 << 0)
-#define DAS6402_CTRL_PACER_TRIG                (3 << 0)
-#define DAS6402_CTRL_BURSTEN           (1 << 2)
-#define DAS6402_CTRL_XINTE             (1 << 3)
+#define DAS6402_CTRL_TRIG(x)           ((x) << 0)
+#define DAS6402_CTRL_SOFT_TRIG         DAS6402_CTRL_TRIG(0)
+#define DAS6402_CTRL_EXT_FALL_TRIG     DAS6402_CTRL_TRIG(1)
+#define DAS6402_CTRL_EXT_RISE_TRIG     DAS6402_CTRL_TRIG(2)
+#define DAS6402_CTRL_PACER_TRIG                DAS6402_CTRL_TRIG(3)
+#define DAS6402_CTRL_BURSTEN           BIT(2)
+#define DAS6402_CTRL_XINTE             BIT(3)
 #define DAS6402_CTRL_IRQ(x)            ((x) << 4)
-#define DAS6402_CTRL_INTE              (1 << 7)
+#define DAS6402_CTRL_INTE              BIT(7)
 #define DAS6402_TRIG_REG               0x0a
-#define DAS6402_TRIG_TGEN              (1 << 0)
-#define DAS6402_TRIG_TGSEL             (1 << 1)
-#define DAS6402_TRIG_TGPOL             (1 << 2)
-#define DAS6402_TRIG_PRETRIG           (1 << 3)
+#define DAS6402_TRIG_TGEN              BIT(0)
+#define DAS6402_TRIG_TGSEL             BIT(1)
+#define DAS6402_TRIG_TGPOL             BIT(2)
+#define DAS6402_TRIG_PRETRIG           BIT(3)
 #define DAS6402_AO_RANGE(_chan, _range)        ((_range) << ((_chan) ? 6 : 4))
 #define DAS6402_AO_RANGE_MASK(_chan)   (3 << ((_chan) ? 6 : 4))
 #define DAS6402_MODE_REG               0x0b
-#define DAS6402_MODE_RANGE(x)          ((x) << 0)
-#define DAS6402_MODE_POLLED            (0 << 2)
-#define DAS6402_MODE_FIFONEPTY         (1 << 2)
-#define DAS6402_MODE_FIFOHFULL         (2 << 2)
-#define DAS6402_MODE_EOB               (3 << 2)
-#define DAS6402_MODE_ENHANCED          (1 << 4)
-#define DAS6402_MODE_SE                        (1 << 5)
-#define DAS6402_MODE_UNI               (1 << 6)
-#define DAS6402_MODE_DMA1              (0 << 7)
-#define DAS6402_MODE_DMA3              (1 << 7)
+#define DAS6402_MODE_RANGE(x)          ((x) << 2)
+#define DAS6402_MODE_POLLED            DAS6402_MODE_RANGE(0)
+#define DAS6402_MODE_FIFONEPTY         DAS6402_MODE_RANGE(1)
+#define DAS6402_MODE_FIFOHFULL         DAS6402_MODE_RANGE(2)
+#define DAS6402_MODE_EOB               DAS6402_MODE_RANGE(3)
+#define DAS6402_MODE_ENHANCED          BIT(4)
+#define DAS6402_MODE_SE                        BIT(5)
+#define DAS6402_MODE_UNI               BIT(6)
+#define DAS6402_MODE_DMA(x)            ((x) << 7)
+#define DAS6402_MODE_DMA1              DAS6402_MODE_DMA(0)
+#define DAS6402_MODE_DMA3              DAS6402_MODE_DMA(1)
 #define DAS6402_TIMER_BASE             0x0c
 
 static const struct comedi_lrange das6402_ai_ranges = {
index b02f12201cf78583cef5ef060ffeb16017f2e948..fd4cb4911671ce1fa145a57473d3b0ba90bea34d 100644 (file)
@@ -1,56 +1,56 @@
 /*
   comedi/drivers/das800.c
   Driver for Keitley das800 series boards and compatibles
   Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
-
   COMEDI - Linux Control and Measurement Device Interface
   Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2 of the License, or
   (at your option) any later version.
-
   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-*/
* comedi/drivers/das800.c
* Driver for Keitley das800 series boards and compatibles
* Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
* GNU General Public License for more details.
+ */
 /*
-Driver: das800
-Description: Keithley Metrabyte DAS800 (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Keithley Metrabyte] DAS-800 (das-800), DAS-801 (das-801),
-  DAS-802 (das-802),
-  [Measurement Computing] CIO-DAS800 (cio-das800),
-  CIO-DAS801 (cio-das801), CIO-DAS802 (cio-das802),
-  CIO-DAS802/16 (cio-das802/16)
-Status: works, cio-das802/16 untested - email me if you have tested it
-
-Configuration options:
-  [0] - I/O port base address
-  [1] - IRQ (optional, required for timed or externally triggered conversions)
-
-Notes:
      IRQ can be omitted, although the cmd interface will not work without it.
-
      All entries in the channel/gain list must use the same gain and be
      consecutive channels counting upwards in channel number (these are
      hardware limitations.)
-
      I've never tested the gain setting stuff since I only have a
      DAS-800 board with fixed gain.
-
      The cio-das802/16 does not have a fifo-empty status bit!  Therefore
      only fifo-half-full transfers are possible with this card.
-
-cmd triggers supported:
      start_src:      TRIG_NOW | TRIG_EXT
      scan_begin_src: TRIG_FOLLOW
      scan_end_src:   TRIG_COUNT
      convert_src:    TRIG_TIMER | TRIG_EXT
      stop_src:       TRIG_NONE | TRIG_COUNT
-*/
+ * Driver: das800
+ * Description: Keithley Metrabyte DAS800 (& compatibles)
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [Keithley Metrabyte] DAS-800 (das-800), DAS-801 (das-801),
* DAS-802 (das-802),
* [Measurement Computing] CIO-DAS800 (cio-das800),
* CIO-DAS801 (cio-das801), CIO-DAS802 (cio-das802),
* CIO-DAS802/16 (cio-das802/16)
+ * Status: works, cio-das802/16 untested - email me if you have tested it
+ *
+ * Configuration options:
* [0] - I/O port base address
* [1] - IRQ (optional, required for timed or externally triggered conversions)
+ *
+ * Notes:
*     IRQ can be omitted, although the cmd interface will not work without it.
+ *
*     All entries in the channel/gain list must use the same gain and be
*     consecutive channels counting upwards in channel number (these are
*     hardware limitations.)
+ *
*     I've never tested the gain setting stuff since I only have a
*     DAS-800 board with fixed gain.
+ *
*     The cio-das802/16 does not have a fifo-empty status bit!  Therefore
*     only fifo-half-full transfers are possible with this card.
+ *
+ * cmd triggers supported:
*     start_src:      TRIG_NOW | TRIG_EXT
*     scan_begin_src: TRIG_FOLLOW
*     scan_end_src:   TRIG_COUNT
*     convert_src:    TRIG_TIMER | TRIG_EXT
*     stop_src:       TRIG_NONE | TRIG_COUNT
+ */
 
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -218,7 +218,7 @@ struct das800_private {
 };
 
 static void das800_ind_write(struct comedi_device *dev,
-                            unsigned val, unsigned reg)
+                            unsigned int val, unsigned int reg)
 {
        /*
         * Select dev->iobase + 2 to be desired register
@@ -228,7 +228,7 @@ static void das800_ind_write(struct comedi_device *dev,
        outb(val, dev->iobase + 2);
 }
 
-static unsigned das800_ind_read(struct comedi_device *dev, unsigned reg)
+static unsigned int das800_ind_read(struct comedi_device *dev, unsigned int reg)
 {
        /*
         * Select dev->iobase + 7 to be desired register
index 958c0d4aae5c0acabbbea3f6623dd9e173a45e0b..b8606ded06232ddff5f1ec7a024a1090f90c198b 100644 (file)
 #define DMM32AT_AI_START_CONV_REG      0x00
 #define DMM32AT_AI_LSB_REG             0x00
 #define DMM32AT_AUX_DOUT_REG           0x01
-#define DMM32AT_AUX_DOUT2              (1 << 2)  /* J3.42 - OUT2 (OUT2EN) */
-#define DMM32AT_AUX_DOUT1              (1 << 1)  /* J3.43 */
-#define DMM32AT_AUX_DOUT0              (1 << 0)  /* J3.44 - OUT0 (OUT0EN) */
+#define DMM32AT_AUX_DOUT2              BIT(2)  /* J3.42 - OUT2 (OUT2EN) */
+#define DMM32AT_AUX_DOUT1              BIT(1)  /* J3.43 */
+#define DMM32AT_AUX_DOUT0              BIT(0)  /* J3.44 - OUT0 (OUT0EN) */
 #define DMM32AT_AI_MSB_REG             0x01
 #define DMM32AT_AI_LO_CHAN_REG         0x02
 #define DMM32AT_AI_HI_CHAN_REG         0x03
 #define DMM32AT_AUX_DI_REG             0x04
-#define DMM32AT_AUX_DI_DACBUSY         (1 << 7)
-#define DMM32AT_AUX_DI_CALBUSY         (1 << 6)
-#define DMM32AT_AUX_DI3                        (1 << 3)  /* J3.45 - ADCLK (CLKSEL) */
-#define DMM32AT_AUX_DI2                        (1 << 2)  /* J3.46 - GATE12 (GT12EN) */
-#define DMM32AT_AUX_DI1                        (1 << 1)  /* J3.47 - GATE0 (GT0EN) */
-#define DMM32AT_AUX_DI0                        (1 << 0)  /* J3.48 - CLK0 (SRC0) */
+#define DMM32AT_AUX_DI_DACBUSY         BIT(7)
+#define DMM32AT_AUX_DI_CALBUSY         BIT(6)
+#define DMM32AT_AUX_DI3                        BIT(3)  /* J3.45 - ADCLK (CLKSEL) */
+#define DMM32AT_AUX_DI2                        BIT(2)  /* J3.46 - GATE12 (GT12EN) */
+#define DMM32AT_AUX_DI1                        BIT(1)  /* J3.47 - GATE0 (GT0EN) */
+#define DMM32AT_AUX_DI0                        BIT(0)  /* J3.48 - CLK0 (SRC0) */
 #define DMM32AT_AO_LSB_REG             0x04
 #define DMM32AT_AO_MSB_REG             0x05
 #define DMM32AT_AO_MSB_DACH(x)         ((x) << 6)
 #define DMM32AT_FIFO_DEPTH_REG         0x06
 #define DMM32AT_FIFO_CTRL_REG          0x07
-#define DMM32AT_FIFO_CTRL_FIFOEN       (1 << 3)
-#define DMM32AT_FIFO_CTRL_SCANEN       (1 << 2)
-#define DMM32AT_FIFO_CTRL_FIFORST      (1 << 1)
+#define DMM32AT_FIFO_CTRL_FIFOEN       BIT(3)
+#define DMM32AT_FIFO_CTRL_SCANEN       BIT(2)
+#define DMM32AT_FIFO_CTRL_FIFORST      BIT(1)
 #define DMM32AT_FIFO_STATUS_REG                0x07
-#define DMM32AT_FIFO_STATUS_EF         (1 << 7)
-#define DMM32AT_FIFO_STATUS_HF         (1 << 6)
-#define DMM32AT_FIFO_STATUS_FF         (1 << 5)
-#define DMM32AT_FIFO_STATUS_OVF                (1 << 4)
-#define DMM32AT_FIFO_STATUS_FIFOEN     (1 << 3)
-#define DMM32AT_FIFO_STATUS_SCANEN     (1 << 2)
+#define DMM32AT_FIFO_STATUS_EF         BIT(7)
+#define DMM32AT_FIFO_STATUS_HF         BIT(6)
+#define DMM32AT_FIFO_STATUS_FF         BIT(5)
+#define DMM32AT_FIFO_STATUS_OVF                BIT(4)
+#define DMM32AT_FIFO_STATUS_FIFOEN     BIT(3)
+#define DMM32AT_FIFO_STATUS_SCANEN     BIT(2)
 #define DMM32AT_FIFO_STATUS_PAGE_MASK  (3 << 0)
 #define DMM32AT_CTRL_REG               0x08
-#define DMM32AT_CTRL_RESETA            (1 << 5)
-#define DMM32AT_CTRL_RESETD            (1 << 4)
-#define DMM32AT_CTRL_INTRST            (1 << 3)
-#define DMM32AT_CTRL_PAGE_8254         (0 << 0)
-#define DMM32AT_CTRL_PAGE_8255         (1 << 0)
-#define DMM32AT_CTRL_PAGE_CALIB                (3 << 0)
+#define DMM32AT_CTRL_RESETA            BIT(5)
+#define DMM32AT_CTRL_RESETD            BIT(4)
+#define DMM32AT_CTRL_INTRST            BIT(3)
+#define DMM32AT_CTRL_PAGE(x)           ((x) << 0)
+#define DMM32AT_CTRL_PAGE_8254         DMM32AT_CTRL_PAGE(0)
+#define DMM32AT_CTRL_PAGE_8255         DMM32AT_CTRL_PAGE(1)
+#define DMM32AT_CTRL_PAGE_CALIB                DMM32AT_CTRL_PAGE(3)
 #define DMM32AT_AI_STATUS_REG          0x08
-#define DMM32AT_AI_STATUS_STS          (1 << 7)
-#define DMM32AT_AI_STATUS_SD1          (1 << 6)
-#define DMM32AT_AI_STATUS_SD0          (1 << 5)
+#define DMM32AT_AI_STATUS_STS          BIT(7)
+#define DMM32AT_AI_STATUS_SD1          BIT(6)
+#define DMM32AT_AI_STATUS_SD0          BIT(5)
 #define DMM32AT_AI_STATUS_ADCH_MASK    (0x1f << 0)
 #define DMM32AT_INTCLK_REG             0x09
-#define DMM32AT_INTCLK_ADINT           (1 << 7)
-#define DMM32AT_INTCLK_DINT            (1 << 6)
-#define DMM32AT_INTCLK_TINT            (1 << 5)
-#define DMM32AT_INTCLK_CLKEN           (1 << 1)  /* 1=see below  0=software */
-#define DMM32AT_INTCLK_CLKSEL          (1 << 0)  /* 1=OUT2  0=EXTCLK */
+#define DMM32AT_INTCLK_ADINT           BIT(7)
+#define DMM32AT_INTCLK_DINT            BIT(6)
+#define DMM32AT_INTCLK_TINT            BIT(5)
+#define DMM32AT_INTCLK_CLKEN           BIT(1)  /* 1=see below  0=software */
+#define DMM32AT_INTCLK_CLKSEL          BIT(0)  /* 1=OUT2  0=EXTCLK */
 #define DMM32AT_CTRDIO_CFG_REG         0x0a
-#define DMM32AT_CTRDIO_CFG_FREQ12      (1 << 7)  /* CLK12 1=100KHz 0=10MHz */
-#define DMM32AT_CTRDIO_CFG_FREQ0       (1 << 6)  /* CLK0  1=10KHz  0=10MHz */
-#define DMM32AT_CTRDIO_CFG_OUT2EN      (1 << 5)  /* J3.42 1=OUT2 is DOUT2 */
-#define DMM32AT_CTRDIO_CFG_OUT0EN      (1 << 4)  /* J3,44 1=OUT0 is DOUT0 */
-#define DMM32AT_CTRDIO_CFG_GT0EN       (1 << 2)  /* J3.47 1=DIN1 is GATE0 */
-#define DMM32AT_CTRDIO_CFG_SRC0                (1 << 1)  /* CLK0 is 0=FREQ0 1=J3.48 */
-#define DMM32AT_CTRDIO_CFG_GT12EN      (1 << 0)  /* J3.46 1=DIN2 is GATE12 */
+#define DMM32AT_CTRDIO_CFG_FREQ12      BIT(7)  /* CLK12 1=100KHz 0=10MHz */
+#define DMM32AT_CTRDIO_CFG_FREQ0       BIT(6)  /* CLK0  1=10KHz  0=10MHz */
+#define DMM32AT_CTRDIO_CFG_OUT2EN      BIT(5)  /* J3.42 1=OUT2 is DOUT2 */
+#define DMM32AT_CTRDIO_CFG_OUT0EN      BIT(4)  /* J3,44 1=OUT0 is DOUT0 */
+#define DMM32AT_CTRDIO_CFG_GT0EN       BIT(2)  /* J3.47 1=DIN1 is GATE0 */
+#define DMM32AT_CTRDIO_CFG_SRC0                BIT(1)  /* CLK0 is 0=FREQ0 1=J3.48 */
+#define DMM32AT_CTRDIO_CFG_GT12EN      BIT(0)  /* J3.46 1=DIN2 is GATE12 */
 #define DMM32AT_AI_CFG_REG             0x0b
-#define DMM32AT_AI_CFG_SCINT_20US      (0 << 4)
-#define DMM32AT_AI_CFG_SCINT_15US      (1 << 4)
-#define DMM32AT_AI_CFG_SCINT_10US      (2 << 4)
-#define DMM32AT_AI_CFG_SCINT_5US       (3 << 4)
-#define DMM32AT_AI_CFG_RANGE           (1 << 3)  /* 0=5V  1=10V */
-#define DMM32AT_AI_CFG_ADBU            (1 << 2)  /* 0=bipolar  1=unipolar */
+#define DMM32AT_AI_CFG_SCINT(x)                ((x) << 4)
+#define DMM32AT_AI_CFG_SCINT_20US      DMM32AT_AI_CFG_SCINT(0)
+#define DMM32AT_AI_CFG_SCINT_15US      DMM32AT_AI_CFG_SCINT(1)
+#define DMM32AT_AI_CFG_SCINT_10US      DMM32AT_AI_CFG_SCINT(2)
+#define DMM32AT_AI_CFG_SCINT_5US       DMM32AT_AI_CFG_SCINT(3)
+#define DMM32AT_AI_CFG_RANGE           BIT(3)  /* 0=5V  1=10V */
+#define DMM32AT_AI_CFG_ADBU            BIT(2)  /* 0=bipolar  1=unipolar */
 #define DMM32AT_AI_CFG_GAIN(x)         ((x) << 0)
 #define DMM32AT_AI_READBACK_REG                0x0b
-#define DMM32AT_AI_READBACK_WAIT       (1 << 7)  /* DMM32AT_AI_STATUS_STS */
-#define DMM32AT_AI_READBACK_RANGE      (1 << 3)
-#define DMM32AT_AI_READBACK_ADBU       (1 << 2)
+#define DMM32AT_AI_READBACK_WAIT       BIT(7)  /* DMM32AT_AI_STATUS_STS */
+#define DMM32AT_AI_READBACK_RANGE      BIT(3)
+#define DMM32AT_AI_READBACK_ADBU       BIT(2)
 #define DMM32AT_AI_READBACK_GAIN_MASK  (3 << 0)
 
 #define DMM32AT_CLK1 0x0d
index 6c7b4d27c27cb7fb748dc6a20321b279a9b063a2..c2ce1eb87385528364d4ca597cec0deeb9367db3 100644 (file)
@@ -4,30 +4,30 @@
  *
  */
 /*
-Driver: dt2801
-Description: Data Translation DT2801 series and DT01-EZ
-Author: ds
-Status: works
-Devices: [Data Translation] DT2801 (dt2801), DT2801-A, DT2801/5716A,
-  DT2805, DT2805/5716A, DT2808, DT2818, DT2809, DT01-EZ
-
-This driver can autoprobe the type of board.
-
-Configuration options:
-  [0] - I/O port base address
-  [1] - unused
-  [2] - A/D reference 0=differential, 1=single-ended
-  [3] - A/D range
        0 = [-10, 10]
        1 = [0,10]
-  [4] - D/A 0 range
        0 = [-10, 10]
        1 = [-5,5]
        2 = [-2.5,2.5]
        3 = [0,10]
        4 = [0,5]
-  [5] - D/A 1 range (same choices)
-*/
+ * Driver: dt2801
+ * Description: Data Translation DT2801 series and DT01-EZ
+ * Author: ds
+ * Status: works
+ * Devices: [Data Translation] DT2801 (dt2801), DT2801-A, DT2801/5716A,
* DT2805, DT2805/5716A, DT2808, DT2818, DT2809, DT01-EZ
+ *
+ * This driver can autoprobe the type of board.
+ *
+ * Configuration options:
* [0] - I/O port base address
* [1] - unused
* [2] - A/D reference 0=differential, 1=single-ended
* [3] - A/D range
*       0 = [-10, 10]
*       1 = [0,10]
* [4] - D/A 0 range
*       0 = [-10, 10]
*       1 = [-5,5]
*       2 = [-2.5,2.5]
*       3 = [0,10]
*       4 = [0,5]
* [5] - D/A 1 range (same choices)
+ */
 
 #include <linux/module.h>
 #include "../comedidev.h"
@@ -65,9 +65,10 @@ Configuration options:
 #define DT_C_SET_AD      0xd
 #define DT_C_READ_AD     0xe
 
-/* Command modifiers (only used with read/write), EXTTRIG can be
-   used with some other commands.
-*/
+/*
+ * Command modifiers (only used with read/write), EXTTRIG can be
+ * used with some other commands.
+ */
 #define DT_MOD_DMA     BIT(4)
 #define DT_MOD_CONT    BIT(5)
 #define DT_MOD_EXTCLK  BIT(6)
@@ -135,9 +136,10 @@ struct dt2801_board {
        int dabits;
 };
 
-/* Typeid's for the different boards of the DT2801-series
-   (taken from the test-software, that comes with the board)
-   */
+/*
+ * Typeid's for the different boards of the DT2801-series
+ * (taken from the test-software, that comes with the board)
+ */
 static const struct dt2801_board boardtypes[] = {
        {
         .name = "dt2801",
@@ -209,15 +211,18 @@ struct dt2801_private {
        const struct comedi_lrange *dac_range_types[2];
 };
 
-/* These are the low-level routines:
-   writecommand: write a command to the board
-   writedata: write data byte
-   readdata: read data byte
+/*
+ * These are the low-level routines:
+ * writecommand: write a command to the board
+ * writedata: write data byte
+ * readdata: read data byte
  */
 
-/* Only checks DataOutReady-flag, not the Ready-flag as it is done
-   in the examples of the manual. I don't see why this should be
-   necessary. */
+/*
+ * Only checks DataOutReady-flag, not the Ready-flag as it is done
+ *  in the examples of the manual. I don't see why this should be
+ *  necessary.
+ */
 static int dt2801_readdata(struct comedi_device *dev, int *data)
 {
        int stat = 0;
@@ -517,14 +522,14 @@ static int dt2801_dio_insn_config(struct comedi_device *dev,
 }
 
 /*
  options:
      [0] - i/o base
      [1] - unused
      [2] - a/d 0=differential, 1=single-ended
      [3] - a/d range 0=[-10,10], 1=[0,10]
      [4] - dac0 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
      [5] - dac1 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
-*/
* options:
*     [0] - i/o base
*     [1] - unused
*     [2] - a/d 0=differential, 1=single-ended
*     [3] - a/d range 0=[-10,10], 1=[0,10]
*     [4] - dac0 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
*     [5] - dac1 range 0=[-10,10], 1=[-5,5], 2=[-2.5,2.5] 3=[0,10], 4=[0,5]
+ */
 static int dt2801_attach(struct comedi_device *dev, struct comedi_devconfig *it)
 {
        const struct dt2801_board *board;
index a80773291fdc8bc0e865f42b43ebcc4dfe273aa3..904f637797b6300de12acc98404d9b9d18ca3a43 100644 (file)
 /*
-   comedi/drivers/dt2811.c
-   Hardware driver for Data Translation DT2811
-
-   COMEDI - Linux Control and Measurement Device Interface
-   History:
-   Base Version  - David A. Schleef <ds@schleef.org>
-   December 1998 - Updated to work.  David does not have a DT2811
-   board any longer so this was suffering from bitrot.
-   Updated performed by ...
-
-   This program is free software; you can redistribute it and/or modify
-   it under the terms of the GNU General Public License as published by
-   the Free Software Foundation; either version 2 of the License, or
-   (at your option) any later version.
-
-   This program is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-   GNU General Public License for more details.
+ * Comedi driver for Data Translation DT2811
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
  */
+
 /*
-Driver: dt2811
-Description: Data Translation DT2811
-Author: ds
-Devices: [Data Translation] DT2811-PGL (dt2811-pgl), DT2811-PGH (dt2811-pgh)
-Status: works
-
-Configuration options:
-  [0] - I/O port base address
-  [1] - IRQ, although this is currently unused
-  [2] - A/D reference
-         0 = signle-ended
-         1 = differential
-         2 = pseudo-differential (common reference)
-  [3] - A/D range
-         0 = [-5, 5]
-         1 = [-2.5, 2.5]
-         2 = [0, 5]
-  [4] - D/A 0 range (same choices)
-  [4] - D/A 1 range (same choices)
-*/
+ * Driver: dt2811
+ * Description: Data Translation DT2811
+ * Author: ds
+ * Devices: [Data Translation] DT2811-PGL (dt2811-pgl), DT2811-PGH (dt2811-pgh)
+ * Status: works
+ *
+ * Configuration options:
+ *   [0] - I/O port base address
+ *   [1] - IRQ (optional, needed for async command support)
+ *   [2] - A/D reference (# of analog inputs)
+ *        0 = single-ended (16 channels)
+ *        1 = differential (8 channels)
+ *        2 = pseudo-differential (16 channels)
+ *   [3] - A/D range (deprecated, see below)
+ *   [4] - D/A 0 range (deprecated, see below)
+ *   [5] - D/A 1 range (deprecated, see below)
+ *
+ * Notes:
+ *   - A/D ranges are not programmable but the gain is. The AI subdevice has
+ *     a range_table containing all the possible analog input range/gain
+ *     options for the dt2811-pgh or dt2811-pgl. Use the range that matches
+ *     your board configuration and the desired gain to correctly convert
+ *     between data values and physical units and to set the correct output
+ *     gain.
+ *   - D/A ranges are not programmable. The AO subdevice has a range_table
+ *     containing all the possible analog output ranges. Use the range
+ *     that matches your board configuration to convert between data
+ *     values and physical units.
+ */
 
 #include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
 #include "../comedidev.h"
 
-static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
-       4, {
-               UNI_RANGE(5),
-               UNI_RANGE(2.5),
-               UNI_RANGE(1.25),
-               UNI_RANGE(0.625)
-       }
+/*
+ * Register I/O map
+ */
+#define DT2811_ADCSR_REG               0x00    /* r/w  A/D Control/Status */
+#define DT2811_ADCSR_ADDONE            BIT(7)  /* r      1=A/D conv done */
+#define DT2811_ADCSR_ADERROR           BIT(6)  /* r      1=A/D error */
+#define DT2811_ADCSR_ADBUSY            BIT(5)  /* r      1=A/D busy */
+#define DT2811_ADCSR_CLRERROR          BIT(4)
+#define DT2811_ADCSR_DMAENB            BIT(3)  /* r/w    1=dma ena */
+#define DT2811_ADCSR_INTENB            BIT(2)  /* r/w    1=interupts ena */
+#define DT2811_ADCSR_ADMODE(x)         (((x) & 0x3) << 0)
+
+#define DT2811_ADGCR_REG               0x01    /* r/w  A/D Gain/Channel */
+#define DT2811_ADGCR_GAIN(x)           (((x) & 0x3) << 6)
+#define DT2811_ADGCR_CHAN(x)           (((x) & 0xf) << 0)
+
+#define DT2811_ADDATA_LO_REG           0x02    /* r   A/D Data low byte */
+#define DT2811_ADDATA_HI_REG           0x03    /* r   A/D Data high byte */
+
+#define DT2811_DADATA_LO_REG(x)                (0x02 + ((x) * 2)) /* w D/A Data low */
+#define DT2811_DADATA_HI_REG(x)                (0x03 + ((x) * 2)) /* w D/A Data high */
+
+#define DT2811_DI_REG                  0x06    /* r   Digital Input Port 0 */
+#define DT2811_DO_REG                  0x06    /* w   Digital Output Port 1 */
+
+#define DT2811_TMRCTR_REG              0x07    /* r/w  Timer/Counter */
+#define DT2811_TMRCTR_MANTISSA(x)      (((x) & 0x7) << 3)
+#define DT2811_TMRCTR_EXPONENT(x)      (((x) & 0x7) << 0)
+
+#define DT2811_OSC_BASE                        1666    /* 600 kHz = 1666.6667ns */
+
+/*
+ * Timer frequency control:
+ *   DT2811_TMRCTR_MANTISSA    DT2811_TMRCTR_EXPONENT
+ *   val  divisor  frequency   val  multiply divisor/divide frequency by
+ *    0      1      600 kHz     0   1
+ *    1     10       60 kHz     1   10
+ *    2      2      300 kHz     2   100
+ *    3      3      200 kHz     3   1000
+ *    4      4      150 kHz     4   10000
+ *    5      5      120 kHz     5   100000
+ *    6      6      100 kHz     6   1000000
+ *    7     12       50 kHz     7   10000000
+ */
+const unsigned int dt2811_clk_dividers[] = {
+       1, 10, 2, 3, 4, 5, 6, 12
 };
 
-static const struct comedi_lrange range_dt2811_pgh_ai_2_5_bipolar = {
-       4, {
-               BIP_RANGE(2.5),
-               BIP_RANGE(1.25),
-               BIP_RANGE(0.625),
-               BIP_RANGE(0.3125)
-       }
+const unsigned int dt2811_clk_multipliers[] = {
+       1, 10, 100, 1000, 10000, 100000, 1000000, 10000000
 };
 
-static const struct comedi_lrange range_dt2811_pgh_ai_5_bipolar = {
-       4, {
-               BIP_RANGE(5),
-               BIP_RANGE(2.5),
-               BIP_RANGE(1.25),
-               BIP_RANGE(0.625)
+/*
+ * The Analog Input range is set using jumpers on the board.
+ *
+ * Input Range         W9  W10
+ * -5V to +5V          In  Out
+ * -2.5V to +2.5V      In  In
+ * 0V to +5V           Out In
+ *
+ * The gain may be set to 1, 2, 4, or 8 (on the dt2811-pgh) or to
+ * 1, 10, 100, 500 (on the dt2811-pgl).
+ */
+static const struct comedi_lrange dt2811_pgh_ai_ranges = {
+       12, {
+               BIP_RANGE(5),           /* range 0: gain=1 */
+               BIP_RANGE(2.5),         /* range 1: gain=2 */
+               BIP_RANGE(1.25),        /* range 2: gain=4 */
+               BIP_RANGE(0.625),       /* range 3: gain=8 */
+
+               BIP_RANGE(2.5),         /* range 0+4: gain=1 */
+               BIP_RANGE(1.25),        /* range 1+4: gain=2 */
+               BIP_RANGE(0.625),       /* range 2+4: gain=4 */
+               BIP_RANGE(0.3125),      /* range 3+4: gain=8 */
+
+               UNI_RANGE(5),           /* range 0+8: gain=1 */
+               UNI_RANGE(2.5),         /* range 1+8: gain=2 */
+               UNI_RANGE(1.25),        /* range 2+8: gain=4 */
+               UNI_RANGE(0.625)        /* range 3+8: gain=8 */
        }
 };
 
-static const struct comedi_lrange range_dt2811_pgl_ai_5_unipolar = {
-       4, {
-               UNI_RANGE(5),
-               UNI_RANGE(0.5),
-               UNI_RANGE(0.05),
-               UNI_RANGE(0.01)
+static const struct comedi_lrange dt2811_pgl_ai_ranges = {
+       12, {
+               BIP_RANGE(5),           /* range 0: gain=1 */
+               BIP_RANGE(0.5),         /* range 1: gain=10 */
+               BIP_RANGE(0.05),        /* range 2: gain=100 */
+               BIP_RANGE(0.01),        /* range 3: gain=500 */
+
+               BIP_RANGE(2.5),         /* range 0+4: gain=1 */
+               BIP_RANGE(0.25),        /* range 1+4: gain=10 */
+               BIP_RANGE(0.025),       /* range 2+4: gain=100 */
+               BIP_RANGE(0.005),       /* range 3+4: gain=500 */
+
+               UNI_RANGE(5),           /* range 0+8: gain=1 */
+               UNI_RANGE(0.5),         /* range 1+8: gain=10 */
+               UNI_RANGE(0.05),        /* range 2+8: gain=100 */
+               UNI_RANGE(0.01)         /* range 3+8: gain=500 */
        }
 };
 
-static const struct comedi_lrange range_dt2811_pgl_ai_2_5_bipolar = {
-       4, {
+/*
+ * The Analog Output range is set per-channel using jumpers on the board.
+ *
+ *                     DAC0 Jumpers            DAC1 Jumpers
+ * Output Range                W5  W6  W7  W8          W1  W2  W3  W4
+ * -5V to +5V          In  Out In  Out         In  Out In  Out
+ * -2.5V to +2.5V      In  Out Out In          In  Out Out In
+ * 0 to +5V            Out In  Out In          Out In  Out In
+ */
+static const struct comedi_lrange dt2811_ao_ranges = {
+       3, {
+               BIP_RANGE(5),   /* default setting from factory */
                BIP_RANGE(2.5),
-               BIP_RANGE(0.25),
-               BIP_RANGE(0.025),
-               BIP_RANGE(0.005)
+               UNI_RANGE(5)
        }
 };
 
-static const struct comedi_lrange range_dt2811_pgl_ai_5_bipolar = {
-       4, {
-               BIP_RANGE(5),
-               BIP_RANGE(0.5),
-               BIP_RANGE(0.05),
-               BIP_RANGE(0.01)
-       }
+struct dt2811_board {
+       const char *name;
+       unsigned int is_pgh:1;
 };
 
-/*
+static const struct dt2811_board dt2811_boards[] = {
+       {
+               .name           = "dt2811-pgh",
+               .is_pgh         = 1,
+       }, {
+               .name           = "dt2811-pgl",
+       },
+};
 
-   0x00    ADCSR R/W  A/D Control/Status Register
-   bit 7 - (R) 1 indicates A/D conversion done
-   reading ADDAT clears bit
-   (W) ignored
-   bit 6 - (R) 1 indicates A/D error
-   (W) ignored
-   bit 5 - (R) 1 indicates A/D busy, cleared at end
-   of conversion
-   (W) ignored
-   bit 4 - (R) 0
-   (W)
-   bit 3 - (R) 0
-   bit 2 - (R/W) 1 indicates interrupts enabled
-   bits 1,0 - (R/W) mode bits
-   00  single conversion on ADGCR load
-   01  continuous conversion, internal clock,
-   (clock enabled on ADGCR load)
-   10  continuous conversion, internal clock,
-   external trigger
-   11  continuous conversion, external clock,
-   external trigger
-
-   0x01    ADGCR R/W A/D Gain/Channel Register
-   bit 6,7 - (R/W) gain select
-   00  gain=1, both PGH, PGL models
-   01  gain=2 PGH, 10 PGL
-   10  gain=4 PGH, 100 PGL
-   11  gain=8 PGH, 500 PGL
-   bit 4,5 - reserved
-   bit 3-0 - (R/W) channel select
-   channel number from 0-15
-
-   0x02,0x03 (R) ADDAT A/D Data Register
-   (W) DADAT0 D/A Data Register 0
-   0x02 low byte
-   0x03 high byte
-
-   0x04,0x05 (W) DADAT0 D/A Data Register 1
-
-   0x06 (R) DIO0 Digital Input Port 0
-   (W) DIO1 Digital Output Port 1
-
-   0x07 TMRCTR (R/W) Timer/Counter Register
-   bits 6,7 - reserved
-   bits 5-3 - Timer frequency control (mantissa)
-   543  divisor  freqency (kHz)
-   000  1        600
-   001  10       60
-   010  2        300
-   011  3        200
-   100  4        150
-   101  5        120
-   110  6        100
-   111  12       50
-   bits 2-0 - Timer frequency control (exponent)
-   210  multiply divisor/divide frequency by
-   000  1
-   001  10
-   010  100
-   011  1000
-   100  10000
-   101  100000
-   110  1000000
-   111  10000000
+struct dt2811_private {
+       unsigned int ai_divisor;
+};
 
- */
+static unsigned int dt2811_ai_read_sample(struct comedi_device *dev,
+                                         struct comedi_subdevice *s)
+{
+       unsigned int val;
 
-#define TIMEOUT 10000
+       val = inb(dev->iobase + DT2811_ADDATA_LO_REG) |
+             (inb(dev->iobase + DT2811_ADDATA_HI_REG) << 8);
 
-#define DT2811_ADCSR 0
-#define DT2811_ADGCR 1
-#define DT2811_ADDATLO 2
-#define DT2811_ADDATHI 3
-#define DT2811_DADAT0LO 2
-#define DT2811_DADAT0HI 3
-#define DT2811_DADAT1LO 4
-#define DT2811_DADAT1HI 5
-#define DT2811_DIO 6
-#define DT2811_TMRCTR 7
+       return val & s->maxdata;
+}
 
-/*
- * flags
- */
+static irqreturn_t dt2811_interrupt(int irq, void *d)
+{
+       struct comedi_device *dev = d;
+       struct comedi_subdevice *s = dev->read_subdev;
+       struct comedi_async *async = s->async;
+       struct comedi_cmd *cmd = &async->cmd;
+       unsigned int status;
 
-/* ADCSR */
+       if (!dev->attached)
+               return IRQ_NONE;
 
-#define DT2811_ADDONE   0x80
-#define DT2811_ADERROR  0x40
-#define DT2811_ADBUSY   0x20
-#define DT2811_CLRERROR 0x10
-#define DT2811_INTENB   0x04
-#define DT2811_ADMODE   0x03
+       status = inb(dev->iobase + DT2811_ADCSR_REG);
 
-struct dt2811_board {
-       const char *name;
-       const struct comedi_lrange *bip_5;
-       const struct comedi_lrange *bip_2_5;
-       const struct comedi_lrange *unip_5;
-};
+       if (status & DT2811_ADCSR_ADERROR) {
+               async->events |= COMEDI_CB_OVERFLOW;
 
-enum { card_2811_pgh, card_2811_pgl };
+               outb(status | DT2811_ADCSR_CLRERROR,
+                    dev->iobase + DT2811_ADCSR_REG);
+       }
 
-struct dt2811_private {
-       int ntrig;
-       int curadchan;
-       enum {
-               adc_singleended, adc_diff, adc_pseudo_diff
-       } adc_mux;
-       enum {
-               dac_bipolar_5, dac_bipolar_2_5, dac_unipolar_5
-       } dac_range[2];
-       const struct comedi_lrange *range_type_list[2];
-};
+       if (status & DT2811_ADCSR_ADDONE) {
+               unsigned short val;
 
-static const struct comedi_lrange *dac_range_types[] = {
-       &range_bipolar5,
-       &range_bipolar2_5,
-       &range_unipolar5
-};
+               val = dt2811_ai_read_sample(dev, s);
+               comedi_buf_write_samples(s, &val, 1);
+       }
+
+       if (cmd->stop_src == TRIG_COUNT && async->scans_done >= cmd->stop_arg)
+               async->events |= COMEDI_CB_EOA;
+
+       comedi_handle_events(dev, s);
+
+       return IRQ_HANDLED;
+}
+
+static int dt2811_ai_cancel(struct comedi_device *dev,
+                           struct comedi_subdevice *s)
+{
+       /*
+        * Mode 0
+        * Single conversion
+        *
+        * Loading a chanspec will trigger a conversion.
+        */
+       outb(DT2811_ADCSR_ADMODE(0), dev->iobase + DT2811_ADCSR_REG);
+
+       return 0;
+}
+
+static void dt2811_ai_set_chanspec(struct comedi_device *dev,
+                                  unsigned int chanspec)
+{
+       unsigned int chan = CR_CHAN(chanspec);
+       unsigned int range = CR_RANGE(chanspec);
+
+       outb(DT2811_ADGCR_CHAN(chan) | DT2811_ADGCR_GAIN(range),
+            dev->iobase + DT2811_ADGCR_REG);
+}
+
+static int dt2811_ai_cmd(struct comedi_device *dev,
+                        struct comedi_subdevice *s)
+{
+       struct dt2811_private *devpriv = dev->private;
+       struct comedi_cmd *cmd = &s->async->cmd;
+       unsigned int mode;
+
+       if (cmd->start_src == TRIG_NOW) {
+               /*
+                * Mode 1
+                * Continuous conversion, internal trigger and clock
+                *
+                * This resets the trigger flip-flop, disabling A/D strobes.
+                * The timer/counter register is loaded with the division
+                * ratio which will give the required sample rate.
+                *
+                * Loading the first chanspec sets the trigger flip-flop,
+                * enabling the timer/counter. A/D strobes are then generated
+                * at the rate set by the internal clock/divider.
+                */
+               mode = DT2811_ADCSR_ADMODE(1);
+       } else { /* TRIG_EXT */
+               if (cmd->convert_src == TRIG_TIMER) {
+                       /*
+                        * Mode 2
+                        * Continuous conversion, external trigger
+                        *
+                        * Similar to Mode 1, with the exception that the
+                        * trigger flip-flop must be set by a negative edge
+                        * on the external trigger input.
+                        */
+                       mode = DT2811_ADCSR_ADMODE(2);
+               } else { /* TRIG_EXT */
+                       /*
+                        * Mode 3
+                        * Continuous conversion, external trigger, clock
+                        *
+                        * Similar to Mode 2, with the exception that the
+                        * conversion rate is set by the frequency on the
+                        * external clock/divider.
+                        */
+                       mode = DT2811_ADCSR_ADMODE(3);
+               }
+       }
+       outb(mode | DT2811_ADCSR_INTENB, dev->iobase + DT2811_ADCSR_REG);
+
+       /* load timer */
+       outb(devpriv->ai_divisor, dev->iobase + DT2811_TMRCTR_REG);
+
+       /* load chanspec - enables timer */
+       dt2811_ai_set_chanspec(dev, cmd->chanlist[0]);
+
+       return 0;
+}
+
+static unsigned int dt2811_ns_to_timer(unsigned int *nanosec,
+                                      unsigned int flags)
+{
+       unsigned long long ns = *nanosec;
+       unsigned int ns_lo = COMEDI_MIN_SPEED;
+       unsigned int ns_hi = 0;
+       unsigned int divisor_hi = 0;
+       unsigned int divisor_lo = 0;
+       unsigned int _div;
+       unsigned int _mult;
+
+       /*
+        * Work through all the divider/multiplier values to find the two
+        * closest divisors to generate the requested nanosecond timing.
+        */
+       for (_div = 0; _div <= 7; _div++) {
+               for (_mult = 0; _mult <= 7; _mult++) {
+                       unsigned int div = dt2811_clk_dividers[_div];
+                       unsigned int mult = dt2811_clk_multipliers[_mult];
+                       unsigned long long divider = div * mult;
+                       unsigned int divisor = DT2811_TMRCTR_MANTISSA(_div) |
+                                              DT2811_TMRCTR_EXPONENT(_mult);
+
+                       /*
+                        * The timer can be configured to run at a slowest
+                        * speed of 0.005hz (600 Khz/120000000), which requires
+                        * 37-bits to represent the nanosecond value. Limit the
+                        * slowest timing to what comedi handles (32-bits).
+                        */
+                       ns = divider * DT2811_OSC_BASE;
+                       if (ns > COMEDI_MIN_SPEED)
+                               continue;
+
+                       /* Check for fastest found timing */
+                       if (ns <= *nanosec && ns > ns_hi) {
+                               ns_hi = ns;
+                               divisor_hi = divisor;
+                       }
+                       /* Check for slowest found timing */
+                       if (ns >= *nanosec && ns < ns_lo) {
+                               ns_lo = ns;
+                               divisor_lo = divisor;
+                       }
+               }
+       }
+
+       /*
+        * The slowest found timing will be invalid if the requested timing
+        * is faster than what can be generated by the timer. Fix it so that
+        * CMDF_ROUND_UP returns valid timing.
+        */
+       if (ns_lo == COMEDI_MIN_SPEED) {
+               ns_lo = ns_hi;
+               divisor_lo = divisor_hi;
+       }
+       /*
+        * The fastest found timing will be invalid if the requested timing
+        * is less than what can be generated by the timer. Fix it so that
+        * CMDF_ROUND_NEAREST and CMDF_ROUND_DOWN return valid timing.
+        */
+       if (ns_hi == 0) {
+               ns_hi = ns_lo;
+               divisor_hi = divisor_lo;
+       }
+
+       switch (flags & CMDF_ROUND_MASK) {
+       case CMDF_ROUND_NEAREST:
+       default:
+               if (ns_hi - *nanosec < *nanosec - ns_lo) {
+                       *nanosec = ns_lo;
+                       return divisor_lo;
+               }
+               *nanosec = ns_hi;
+               return divisor_hi;
+       case CMDF_ROUND_UP:
+               *nanosec = ns_lo;
+               return divisor_lo;
+       case CMDF_ROUND_DOWN:
+               *nanosec = ns_hi;
+               return divisor_hi;
+       }
+}
+
+static int dt2811_ai_cmdtest(struct comedi_device *dev,
+                            struct comedi_subdevice *s,
+                            struct comedi_cmd *cmd)
+{
+       struct dt2811_private *devpriv = dev->private;
+       unsigned int arg;
+       int err = 0;
+
+       /* Step 1 : check if triggers are trivially valid */
+
+       err |= comedi_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_EXT);
+       err |= comedi_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
+       err |= comedi_check_trigger_src(&cmd->convert_src,
+                                       TRIG_TIMER | TRIG_EXT);
+       err |= comedi_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+       err |= comedi_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+       if (err)
+               return 1;
+
+       /* Step 2a : make sure trigger sources are unique */
+
+       err |= comedi_check_trigger_is_unique(cmd->start_src);
+       err |= comedi_check_trigger_is_unique(cmd->convert_src);
+       err |= comedi_check_trigger_is_unique(cmd->stop_src);
+
+       /* Step 2b : and mutually compatible */
+
+       if (cmd->convert_src == TRIG_EXT && cmd->start_src != TRIG_EXT)
+               err |= -EINVAL;
+
+       if (err)
+               return 2;
+
+       /* Step 3: check if arguments are trivially valid */
+
+       err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
+       err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+       if (cmd->convert_src == TRIG_TIMER)
+               err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 12500);
+       err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
+                                          cmd->chanlist_len);
+       if (cmd->stop_src == TRIG_COUNT)
+               err |= comedi_check_trigger_arg_min(&cmd->stop_arg, 1);
+       else    /* TRIG_NONE */
+               err |= comedi_check_trigger_arg_is(&cmd->stop_arg, 0);
+
+       if (err)
+               return 3;
+
+       /* Step 4: fix up any arguments */
+
+       if (cmd->convert_src == TRIG_TIMER) {
+               arg = cmd->convert_arg;
+               devpriv->ai_divisor = dt2811_ns_to_timer(&arg, cmd->flags);
+               err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+       } else { /* TRIG_EXT */
+               /* The convert_arg is used to set the divisor. */
+               devpriv->ai_divisor = cmd->convert_arg;
+       }
+
+       if (err)
+               return 4;
+
+       /* Step 5: check channel list if it exists */
+
+       return 0;
+}
 
 static int dt2811_ai_eoc(struct comedi_device *dev,
                         struct comedi_subdevice *s,
@@ -227,32 +472,33 @@ static int dt2811_ai_eoc(struct comedi_device *dev,
 {
        unsigned int status;
 
-       status = inb(dev->iobase + DT2811_ADCSR);
-       if ((status & DT2811_ADBUSY) == 0)
+       status = inb(dev->iobase + DT2811_ADCSR_REG);
+       if ((status & DT2811_ADCSR_ADBUSY) == 0)
                return 0;
        return -EBUSY;
 }
 
-static int dt2811_ai_insn(struct comedi_device *dev, struct comedi_subdevice *s,
-                         struct comedi_insn *insn, unsigned int *data)
+static int dt2811_ai_insn_read(struct comedi_device *dev,
+                              struct comedi_subdevice *s,
+                              struct comedi_insn *insn,
+                              unsigned int *data)
 {
-       int chan = CR_CHAN(insn->chanspec);
        int ret;
        int i;
 
+       /* We will already be in Mode 0 */
        for (i = 0; i < insn->n; i++) {
-               outb(chan, dev->iobase + DT2811_ADGCR);
+               /* load chanspec and trigger conversion */
+               dt2811_ai_set_chanspec(dev, insn->chanspec);
 
                ret = comedi_timeout(dev, s, insn, dt2811_ai_eoc, 0);
                if (ret)
                        return ret;
 
-               data[i] = inb(dev->iobase + DT2811_ADDATLO);
-               data[i] |= inb(dev->iobase + DT2811_ADDATHI) << 8;
-               data[i] &= 0xfff;
+               data[i] = dt2811_ai_read_sample(dev, s);
        }
 
-       return i;
+       return insn->n;
 }
 
 static int dt2811_ao_insn_write(struct comedi_device *dev,
@@ -266,9 +512,9 @@ static int dt2811_ao_insn_write(struct comedi_device *dev,
 
        for (i = 0; i < insn->n; i++) {
                val = data[i];
-               outb(val & 0xff, dev->iobase + DT2811_DADAT0LO + 2 * chan);
+               outb(val & 0xff, dev->iobase + DT2811_DADATA_LO_REG(chan));
                outb((val >> 8) & 0xff,
-                    dev->iobase + DT2811_DADAT0HI + 2 * chan);
+                    dev->iobase + DT2811_DADATA_HI_REG(chan));
        }
        s->readback[chan] = val;
 
@@ -277,9 +523,10 @@ static int dt2811_ao_insn_write(struct comedi_device *dev,
 
 static int dt2811_di_insn_bits(struct comedi_device *dev,
                               struct comedi_subdevice *s,
-                              struct comedi_insn *insn, unsigned int *data)
+                              struct comedi_insn *insn,
+                              unsigned int *data)
 {
-       data[1] = inb(dev->iobase + DT2811_DIO);
+       data[1] = inb(dev->iobase + DT2811_DI_REG);
 
        return insn->n;
 }
@@ -290,185 +537,118 @@ static int dt2811_do_insn_bits(struct comedi_device *dev,
                               unsigned int *data)
 {
        if (comedi_dio_update_state(s, data))
-               outb(s->state, dev->iobase + DT2811_DIO);
+               outb(s->state, dev->iobase + DT2811_DO_REG);
 
        data[1] = s->state;
 
        return insn->n;
 }
 
-/*
-  options[0]   Board base address
-  options[1]   IRQ
-  options[2]   Input configuration
-                0 == single-ended
-                1 == differential
-                2 == pseudo-differential
-  options[3]   Analog input range configuration
-                0 == bipolar 5  (-5V -- +5V)
-                1 == bipolar 2.5V  (-2.5V -- +2.5V)
-                2 == unipolar 5V  (0V -- +5V)
-  options[4]   Analog output 0 range configuration
-                0 == bipolar 5  (-5V -- +5V)
-                1 == bipolar 2.5V  (-2.5V -- +2.5V)
-                2 == unipolar 5V  (0V -- +5V)
-  options[5]   Analog output 1 range configuration
-                0 == bipolar 5  (-5V -- +5V)
-                1 == bipolar 2.5V  (-2.5V -- +2.5V)
-                2 == unipolar 5V  (0V -- +5V)
-*/
+static void dt2811_reset(struct comedi_device *dev)
+{
+       /* This is the initialization sequence from the users manual */
+       outb(DT2811_ADCSR_ADMODE(0), dev->iobase + DT2811_ADCSR_REG);
+       usleep_range(100, 1000);
+       inb(dev->iobase + DT2811_ADDATA_LO_REG);
+       inb(dev->iobase + DT2811_ADDATA_HI_REG);
+       outb(DT2811_ADCSR_ADMODE(0) | DT2811_ADCSR_CLRERROR,
+            dev->iobase + DT2811_ADCSR_REG);
+}
+
 static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
 {
-       /* int i; */
        const struct dt2811_board *board = dev->board_ptr;
        struct dt2811_private *devpriv;
-       int ret;
        struct comedi_subdevice *s;
+       int ret;
+
+       devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+       if (!devpriv)
+               return -ENOMEM;
 
        ret = comedi_request_region(dev, it->options[0], 0x8);
        if (ret)
                return ret;
 
-#if 0
-       outb(0, dev->iobase + DT2811_ADCSR);
-       udelay(100);
-       i = inb(dev->iobase + DT2811_ADDATLO);
-       i = inb(dev->iobase + DT2811_ADDATHI);
-#endif
+       dt2811_reset(dev);
+
+       /* IRQ's 2,3,5,7 are valid for async command support */
+       if (it->options[1] <= 7  && (BIT(it->options[1]) & 0xac)) {
+               ret = request_irq(it->options[1], dt2811_interrupt, 0,
+                                 dev->board_name, dev);
+               if (ret == 0)
+                       dev->irq = it->options[1];
+       }
 
        ret = comedi_alloc_subdevices(dev, 4);
        if (ret)
                return ret;
 
-       devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
-       if (!devpriv)
-               return -ENOMEM;
-
-       switch (it->options[2]) {
-       case 0:
-               devpriv->adc_mux = adc_singleended;
-               break;
-       case 1:
-               devpriv->adc_mux = adc_diff;
-               break;
-       case 2:
-               devpriv->adc_mux = adc_pseudo_diff;
-               break;
-       default:
-               devpriv->adc_mux = adc_singleended;
-               break;
-       }
-       switch (it->options[4]) {
-       case 0:
-               devpriv->dac_range[0] = dac_bipolar_5;
-               break;
-       case 1:
-               devpriv->dac_range[0] = dac_bipolar_2_5;
-               break;
-       case 2:
-               devpriv->dac_range[0] = dac_unipolar_5;
-               break;
-       default:
-               devpriv->dac_range[0] = dac_bipolar_5;
-               break;
-       }
-       switch (it->options[5]) {
-       case 0:
-               devpriv->dac_range[1] = dac_bipolar_5;
-               break;
-       case 1:
-               devpriv->dac_range[1] = dac_bipolar_2_5;
-               break;
-       case 2:
-               devpriv->dac_range[1] = dac_unipolar_5;
-               break;
-       default:
-               devpriv->dac_range[1] = dac_bipolar_5;
-               break;
-       }
-
+       /* Analog Input subdevice */
        s = &dev->subdevices[0];
-       /* initialize the ADC subdevice */
-       s->type = COMEDI_SUBD_AI;
-       s->subdev_flags = SDF_READABLE | SDF_GROUND;
-       s->n_chan = devpriv->adc_mux == adc_diff ? 8 : 16;
-       s->insn_read = dt2811_ai_insn;
-       s->maxdata = 0xfff;
-       switch (it->options[3]) {
-       case 0:
-       default:
-               s->range_table = board->bip_5;
-               break;
-       case 1:
-               s->range_table = board->bip_2_5;
-               break;
-       case 2:
-               s->range_table = board->unip_5;
-               break;
+       s->type         = COMEDI_SUBD_AI;
+       s->subdev_flags = SDF_READABLE |
+                         (it->options[2] == 1) ? SDF_DIFF :
+                         (it->options[2] == 2) ? SDF_COMMON : SDF_GROUND;
+       s->n_chan       = (it->options[2] == 1) ? 8 : 16;
+       s->maxdata      = 0x0fff;
+       s->range_table  = board->is_pgh ? &dt2811_pgh_ai_ranges
+                                       : &dt2811_pgl_ai_ranges;
+       s->insn_read    = dt2811_ai_insn_read;
+       if (dev->irq) {
+               dev->read_subdev = s;
+               s->subdev_flags |= SDF_CMD_READ;
+               s->len_chanlist = 1;
+               s->do_cmdtest   = dt2811_ai_cmdtest;
+               s->do_cmd       = dt2811_ai_cmd;
+               s->cancel       = dt2811_ai_cancel;
        }
 
+       /* Analog Output subdevice */
        s = &dev->subdevices[1];
-       /* ao subdevice */
-       s->type = COMEDI_SUBD_AO;
-       s->subdev_flags = SDF_WRITABLE;
-       s->n_chan = 2;
-       s->maxdata = 0xfff;
-       s->range_table_list = devpriv->range_type_list;
-       devpriv->range_type_list[0] = dac_range_types[devpriv->dac_range[0]];
-       devpriv->range_type_list[1] = dac_range_types[devpriv->dac_range[1]];
-       s->insn_write = dt2811_ao_insn_write;
+       s->type         = COMEDI_SUBD_AO;
+       s->subdev_flags = SDF_WRITABLE;
+       s->n_chan       = 2;
+       s->maxdata      = 0x0fff;
+       s->range_table  = &dt2811_ao_ranges;
+       s->insn_write   = dt2811_ao_insn_write;
 
        ret = comedi_alloc_subdev_readback(s);
        if (ret)
                return ret;
 
+       /* Digital Input subdevice */
        s = &dev->subdevices[2];
-       /* di subdevice */
-       s->type = COMEDI_SUBD_DI;
-       s->subdev_flags = SDF_READABLE;
-       s->n_chan = 8;
-       s->insn_bits = dt2811_di_insn_bits;
-       s->maxdata = 1;
-       s->range_table = &range_digital;
-
+       s->type         = COMEDI_SUBD_DI;
+       s->subdev_flags = SDF_READABLE;
+       s->n_chan       = 8;
+       s->maxdata      = 1;
+       s->range_table  = &range_digital;
+       s->insn_bits    = dt2811_di_insn_bits;
+
+       /* Digital Output subdevice */
        s = &dev->subdevices[3];
-       /* do subdevice */
-       s->type = COMEDI_SUBD_DO;
-       s->subdev_flags = SDF_WRITABLE;
-       s->n_chan = 8;
-       s->insn_bits = dt2811_do_insn_bits;
-       s->maxdata = 1;
-       s->state = 0;
-       s->range_table = &range_digital;
+       s->type         = COMEDI_SUBD_DO;
+       s->subdev_flags = SDF_WRITABLE;
+       s->n_chan       = 8;
+       s->maxdata      = 1;
+       s->range_table  = &range_digital;
+       s->insn_bits    = dt2811_do_insn_bits;
 
        return 0;
 }
 
-static const struct dt2811_board boardtypes[] = {
-       {
-               .name           = "dt2811-pgh",
-               .bip_5          = &range_dt2811_pgh_ai_5_bipolar,
-               .bip_2_5        = &range_dt2811_pgh_ai_2_5_bipolar,
-               .unip_5         = &range_dt2811_pgh_ai_5_unipolar,
-       }, {
-               .name           = "dt2811-pgl",
-               .bip_5          = &range_dt2811_pgl_ai_5_bipolar,
-               .bip_2_5        = &range_dt2811_pgl_ai_2_5_bipolar,
-               .unip_5         = &range_dt2811_pgl_ai_5_unipolar,
-       },
-};
-
 static struct comedi_driver dt2811_driver = {
        .driver_name    = "dt2811",
        .module         = THIS_MODULE,
        .attach         = dt2811_attach,
        .detach         = comedi_legacy_detach,
-       .board_name     = &boardtypes[0].name,
-       .num_names      = ARRAY_SIZE(boardtypes),
+       .board_name     = &dt2811_boards[0].name,
+       .num_names      = ARRAY_SIZE(dt2811_boards),
        .offset         = sizeof(struct dt2811_board),
 };
 module_comedi_driver(dt2811_driver);
 
 MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for Data Translation DT2811 series boards");
 MODULE_LICENSE("GPL");
index 66705f9a0621f928488f4f450fa1d00ed2df33c5..2f903bedcefa96119a1c05323b81149d7f12042c 100644 (file)
@@ -1,38 +1,38 @@
 /*
   comedi/drivers/dt2814.c
   Hardware driver for Data Translation DT2814
-
   COMEDI - Linux Control and Measurement Device Interface
   Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2 of the License, or
   (at your option) any later version.
-
   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-*/
* comedi/drivers/dt2814.c
* Hardware driver for Data Translation DT2814
+ *
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
* GNU General Public License for more details.
+ */
 /*
-Driver: dt2814
-Description: Data Translation DT2814
-Author: ds
-Status: complete
-Devices: [Data Translation] DT2814 (dt2814)
-
-Configuration options:
-  [0] - I/O port base address
-  [1] - IRQ
-
-This card has 16 analog inputs multiplexed onto a 12 bit ADC.  There
-is a minimally useful onboard clock.  The base frequency for the
-clock is selected by jumpers, and the clock divider can be selected
-via programmed I/O.  Unfortunately, the clock divider can only be
-a power of 10, from 1 to 10^7, of which only 3 or 4 are useful.  In
-addition, the clock does not seem to be very accurate.
-*/
+ * Driver: dt2814
+ * Description: Data Translation DT2814
+ * Author: ds
+ * Status: complete
+ * Devices: [Data Translation] DT2814 (dt2814)
+ *
+ * Configuration options:
* [0] - I/O port base address
* [1] - IRQ
+ *
+ * This card has 16 analog inputs multiplexed onto a 12 bit ADC.  There
+ * is a minimally useful onboard clock.  The base frequency for the
+ * clock is selected by jumpers, and the clock divider can be selected
+ * via programmed I/O.  Unfortunately, the clock divider can only be
+ * a power of 10, from 1 to 10^7, of which only 3 or 4 are useful.  In
+ * addition, the clock does not seem to be very accurate.
+ */
 
 #include <linux/module.h>
 #include <linux/interrupt.h>
@@ -215,8 +215,10 @@ static irqreturn_t dt2814_interrupt(int irq, void *d)
                int i;
 
                outb(0, dev->iobase + DT2814_CSR);
-               /* note: turning off timed mode triggers another
-                  sample. */
+               /*
+                * note: turning off timed mode triggers another
+                * sample.
+                */
 
                for (i = 0; i < DT2814_TIMEOUT; i++) {
                        if (inb(dev->iobase + DT2814_CSR) & DT2814_FINISH)
index fb08569c1ac1fd4b582f7b46d7599f8010c95e30..0be77cc40a79c67230dae2a25addd5f9896e7977 100644 (file)
@@ -1,55 +1,55 @@
 /*
  comedi/drivers/dt2815.c
  Hardware driver for Data Translation DT2815
-
  COMEDI - Linux Control and Measurement Device Interface
  Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
-
  This program is free software; you can redistribute it and/or modify
  it under the terms of the GNU General Public License as published by
  the Free Software Foundation; either version 2 of the License, or
  (at your option) any later version.
-
  This program is distributed in the hope that it will be useful,
  but WITHOUT ANY WARRANTY; without even the implied warranty of
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  GNU General Public License for more details.
* comedi/drivers/dt2815.c
* Hardware driver for Data Translation DT2815
+ *
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
* GNU General Public License for more details.
  */
 /*
-Driver: dt2815
-Description: Data Translation DT2815
-Author: ds
-Status: mostly complete, untested
-Devices: [Data Translation] DT2815 (dt2815)
-
-I'm not sure anyone has ever tested this board.  If you have information
-contrary, please update.
-
-Configuration options:
-  [0] - I/O port base base address
-  [1] - IRQ (unused)
-  [2] - Voltage unipolar/bipolar configuration
      0 == unipolar 5V  (0V -- +5V)
      1 == bipolar 5V  (-5V -- +5V)
-  [3] - Current offset configuration
      0 == disabled  (0mA -- +32mAV)
      1 == enabled  (+4mA -- +20mAV)
-  [4] - Firmware program configuration
      0 == program 1 (see manual table 5-4)
      1 == program 2 (see manual table 5-4)
      2 == program 3 (see manual table 5-4)
      3 == program 4 (see manual table 5-4)
-  [5] - Analog output 0 range configuration
      0 == voltage
      1 == current
-  [6] - Analog output 1 range configuration (same options)
-  [7] - Analog output 2 range configuration (same options)
-  [8] - Analog output 3 range configuration (same options)
-  [9] - Analog output 4 range configuration (same options)
-  [10] - Analog output 5 range configuration (same options)
-  [11] - Analog output 6 range configuration (same options)
-  [12] - Analog output 7 range configuration (same options)
-*/
+ * Driver: dt2815
+ * Description: Data Translation DT2815
+ * Author: ds
+ * Status: mostly complete, untested
+ * Devices: [Data Translation] DT2815 (dt2815)
+ *
+ * I'm not sure anyone has ever tested this board.  If you have information
+ * contrary, please update.
+ *
+ * Configuration options:
* [0] - I/O port base base address
* [1] - IRQ (unused)
* [2] - Voltage unipolar/bipolar configuration
*     0 == unipolar 5V  (0V -- +5V)
*     1 == bipolar 5V  (-5V -- +5V)
* [3] - Current offset configuration
*     0 == disabled  (0mA -- +32mAV)
*     1 == enabled  (+4mA -- +20mAV)
* [4] - Firmware program configuration
*     0 == program 1 (see manual table 5-4)
*     1 == program 2 (see manual table 5-4)
*     2 == program 3 (see manual table 5-4)
*     3 == program 4 (see manual table 5-4)
* [5] - Analog output 0 range configuration
*     0 == voltage
*     1 == current
* [6] - Analog output 1 range configuration (same options)
* [7] - Analog output 2 range configuration (same options)
* [8] - Analog output 3 range configuration (same options)
* [9] - Analog output 4 range configuration (same options)
* [10] - Analog output 5 range configuration (same options)
* [11] - Analog output 6 range configuration (same options)
* [12] - Analog output 7 range configuration (same options)
+ */
 
 #include <linux/module.h>
 #include "../comedidev.h"
@@ -120,27 +120,27 @@ static int dt2815_ao_insn(struct comedi_device *dev, struct comedi_subdevice *s,
 }
 
 /*
-  options[0]   Board base address
-  options[1]   IRQ (not applicable)
-  options[2]   Voltage unipolar/bipolar configuration
              0 == unipolar 5V  (0V -- +5V)
              1 == bipolar 5V  (-5V -- +5V)
-  options[3]   Current offset configuration
              0 == disabled  (0mA -- +32mAV)
              1 == enabled  (+4mA -- +20mAV)
-  options[4]   Firmware program configuration
              0 == program 1 (see manual table 5-4)
              1 == program 2 (see manual table 5-4)
              2 == program 3 (see manual table 5-4)
              3 == program 4 (see manual table 5-4)
-  options[5]   Analog output 0 range configuration
              0 == voltage
              1 == current
-  options[6]   Analog output 1 range configuration
-  ...
-  options[12]   Analog output 7 range configuration
              0 == voltage
              1 == current
* options[0]   Board base address
* options[1]   IRQ (not applicable)
* options[2]   Voltage unipolar/bipolar configuration
*             0 == unipolar 5V  (0V -- +5V)
*             1 == bipolar 5V  (-5V -- +5V)
* options[3]   Current offset configuration
*             0 == disabled  (0mA -- +32mAV)
*             1 == enabled  (+4mA -- +20mAV)
* options[4]   Firmware program configuration
*             0 == program 1 (see manual table 5-4)
*             1 == program 2 (see manual table 5-4)
*             2 == program 3 (see manual table 5-4)
*             3 == program 4 (see manual table 5-4)
* options[5]   Analog output 0 range configuration
*             0 == voltage
*             1 == current
* options[6]   Analog output 1 range configuration
* ...
* options[12]   Analog output 7 range configuration
*             0 == voltage
*             1 == current
  */
 
 static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
index 5131deebf66fec8108506f19fd99e1727754c7a0..39d2566e49bf1b05123f958f1a377c88437e0c81 100644 (file)
@@ -1,37 +1,37 @@
 /*
   comedi/drivers/dt2817.c
   Hardware driver for Data Translation DT2817
-
   COMEDI - Linux Control and Measurement Device Interface
   Copyright (C) 1998 David A. Schleef <ds@schleef.org>
-
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 2 of the License, or
   (at your option) any later version.
-
   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.
-*/
* comedi/drivers/dt2817.c
* Hardware driver for Data Translation DT2817
+ *
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
* GNU General Public License for more details.
+ */
 /*
-Driver: dt2817
-Description: Data Translation DT2817
-Author: ds
-Status: complete
-Devices: [Data Translation] DT2817 (dt2817)
-
-A very simple digital I/O card.  Four banks of 8 lines, each bank
-is configurable for input or output.  One wonders why it takes a
-50 page manual to describe this thing.
-
-The driver (which, btw, is much less than 50 pages) has 1 subdevice
-with 32 channels, configurable in groups of 8.
-
-Configuration options:
-  [0] - I/O port base base address
-*/
+ * Driver: dt2817
+ * Description: Data Translation DT2817
+ * Author: ds
+ * Status: complete
+ * Devices: [Data Translation] DT2817 (dt2817)
+ *
+ * A very simple digital I/O card.  Four banks of 8 lines, each bank
+ * is configurable for input or output.  One wonders why it takes a
+ * 50 page manual to describe this thing.
+ *
+ * The driver (which, btw, is much less than 50 pages) has 1 subdevice
+ * with 32 channels, configurable in groups of 8.
+ *
+ * Configuration options:
* [0] - I/O port base base address
+ */
 
 #include <linux/module.h>
 #include "../comedidev.h"
index 63b5cbc44bda7755e8b1d6d0bc2577c2f8ebf931..af4b4175af4daecf864bb68f2aeb2243d8f160ab 100644 (file)
@@ -158,10 +158,7 @@ static void gsc_hpdi_drain_dma(struct comedi_device *dev, unsigned int channel)
        unsigned int size;
        unsigned int next;
 
-       if (channel)
-               next = readl(devpriv->plx9080_mmio + PLX_DMA1_PCI_ADDRESS_REG);
-       else
-               next = readl(devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
+       next = readl(devpriv->plx9080_mmio + PLX_REG_DMAPADR(channel));
 
        idx = devpriv->dma_desc_index;
        start = le32_to_cpu(devpriv->dma_desc[idx].pci_start_addr);
@@ -201,8 +198,9 @@ static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
        if (!dev->attached)
                return IRQ_NONE;
 
-       plx_status = readl(devpriv->plx9080_mmio + PLX_INTRCS_REG);
-       if ((plx_status & (ICS_DMA0_A | ICS_DMA1_A | ICS_LIA)) == 0)
+       plx_status = readl(devpriv->plx9080_mmio + PLX_REG_INTCSR);
+       if ((plx_status &
+            (PLX_INTCSR_DMA0IA | PLX_INTCSR_DMA1IA | PLX_INTCSR_PLIA)) == 0)
                return IRQ_NONE;
 
        hpdi_intr_status = readl(dev->mmio + INTERRUPT_STATUS_REG);
@@ -213,32 +211,32 @@ static irqreturn_t gsc_hpdi_interrupt(int irq, void *d)
 
        /* spin lock makes sure no one else changes plx dma control reg */
        spin_lock_irqsave(&dev->spinlock, flags);
-       dma0_status = readb(devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
-       if (plx_status & ICS_DMA0_A) {
+       dma0_status = readb(devpriv->plx9080_mmio + PLX_REG_DMACSR0);
+       if (plx_status & PLX_INTCSR_DMA0IA) {
                /* dma chan 0 interrupt */
-               writeb((dma0_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
-                      devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+               writeb((dma0_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
+                      devpriv->plx9080_mmio + PLX_REG_DMACSR0);
 
-               if (dma0_status & PLX_DMA_EN_BIT)
+               if (dma0_status & PLX_DMACSR_ENABLE)
                        gsc_hpdi_drain_dma(dev, 0);
        }
        spin_unlock_irqrestore(&dev->spinlock, flags);
 
        /* spin lock makes sure no one else changes plx dma control reg */
        spin_lock_irqsave(&dev->spinlock, flags);
-       dma1_status = readb(devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
-       if (plx_status & ICS_DMA1_A) {
+       dma1_status = readb(devpriv->plx9080_mmio + PLX_REG_DMACSR1);
+       if (plx_status & PLX_INTCSR_DMA1IA) {
                /* XXX */ /* dma chan 1 interrupt */
-               writeb((dma1_status & PLX_DMA_EN_BIT) | PLX_CLEAR_DMA_INTR_BIT,
-                      devpriv->plx9080_mmio + PLX_DMA1_CS_REG);
+               writeb((dma1_status & PLX_DMACSR_ENABLE) | PLX_DMACSR_CLEARINTR,
+                      devpriv->plx9080_mmio + PLX_REG_DMACSR1);
        }
        spin_unlock_irqrestore(&dev->spinlock, flags);
 
        /* clear possible plx9080 interrupt sources */
-       if (plx_status & ICS_LDIA) {
+       if (plx_status & PLX_INTCSR_LDBIA) {
                /* clear local doorbell interrupt */
-               plx_bits = readl(devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
-               writel(plx_bits, devpriv->plx9080_mmio + PLX_DBR_OUT_REG);
+               plx_bits = readl(devpriv->plx9080_mmio + PLX_REG_L2PDBELL);
+               writel(plx_bits, devpriv->plx9080_mmio + PLX_REG_L2PDBELL);
        }
 
        if (hpdi_board_status & RX_OVERRUN_BIT) {
@@ -307,19 +305,19 @@ static int gsc_hpdi_cmd(struct comedi_device *dev,
         * occasionally cause problems with transfer of first dma
         * block.  Initializing them to zero seems to fix the problem.
         */
-       writel(0, devpriv->plx9080_mmio + PLX_DMA0_TRANSFER_SIZE_REG);
-       writel(0, devpriv->plx9080_mmio + PLX_DMA0_PCI_ADDRESS_REG);
-       writel(0, devpriv->plx9080_mmio + PLX_DMA0_LOCAL_ADDRESS_REG);
+       writel(0, devpriv->plx9080_mmio + PLX_REG_DMASIZ0);
+       writel(0, devpriv->plx9080_mmio + PLX_REG_DMAPADR0);
+       writel(0, devpriv->plx9080_mmio + PLX_REG_DMALADR0);
 
        /* give location of first dma descriptor */
-       bits = devpriv->dma_desc_phys_addr | PLX_DESC_IN_PCI_BIT |
-              PLX_INTR_TERM_COUNT | PLX_XFER_LOCAL_TO_PCI;
-       writel(bits, devpriv->plx9080_mmio + PLX_DMA0_DESCRIPTOR_REG);
+       bits = devpriv->dma_desc_phys_addr | PLX_DMADPR_DESCPCI |
+              PLX_DMADPR_TCINTR | PLX_DMADPR_XFERL2P;
+       writel(bits, devpriv->plx9080_mmio + PLX_REG_DMADPR0);
 
        /* enable dma transfer */
        spin_lock_irqsave(&dev->spinlock, flags);
-       writeb(PLX_DMA_EN_BIT | PLX_DMA_START_BIT | PLX_CLEAR_DMA_INTR_BIT,
-              devpriv->plx9080_mmio + PLX_DMA0_CS_REG);
+       writeb(PLX_DMACSR_ENABLE | PLX_DMACSR_START | PLX_DMACSR_CLEARINTR,
+              devpriv->plx9080_mmio + PLX_REG_DMACSR0);
        spin_unlock_irqrestore(&dev->spinlock, flags);
 
        if (cmd->stop_src == TRIG_COUNT)
@@ -424,8 +422,8 @@ static int gsc_hpdi_setup_dma_descriptors(struct comedi_device *dev,
 {
        struct hpdi_private *devpriv = dev->private;
        dma_addr_t phys_addr = devpriv->dma_desc_phys_addr;
-       u32 next_bits = PLX_DESC_IN_PCI_BIT | PLX_INTR_TERM_COUNT |
-                       PLX_XFER_LOCAL_TO_PCI;
+       u32 next_bits = PLX_DMADPR_DESCPCI | PLX_DMADPR_TCINTR |
+                       PLX_DMADPR_XFERL2P;
        unsigned int offset = 0;
        unsigned int idx = 0;
        unsigned int i;
@@ -536,9 +534,10 @@ static int gsc_hpdi_init(struct comedi_device *dev)
 
        /* enable interrupts */
        plx_intcsr_bits =
-           ICS_AERR | ICS_PERR | ICS_PIE | ICS_PLIE | ICS_PAIE | ICS_LIE |
-           ICS_DMA0_E;
-       writel(plx_intcsr_bits, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+           PLX_INTCSR_LSEABORTEN | PLX_INTCSR_LSEPARITYEN | PLX_INTCSR_PIEN |
+           PLX_INTCSR_PLIEN | PLX_INTCSR_PABORTIEN | PLX_INTCSR_LIOEN |
+           PLX_INTCSR_DMA0IEN;
+       writel(plx_intcsr_bits, devpriv->plx9080_mmio + PLX_REG_INTCSR);
 
        return 0;
 }
@@ -550,13 +549,13 @@ static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
        void __iomem *plx_iobase = devpriv->plx9080_mmio;
 
 #ifdef __BIG_ENDIAN
-       bits = BIGEND_DMA0 | BIGEND_DMA1;
+       bits = PLX_BIGEND_DMA0 | PLX_BIGEND_DMA1;
 #else
        bits = 0;
 #endif
-       writel(bits, devpriv->plx9080_mmio + PLX_BIGEND_REG);
+       writel(bits, devpriv->plx9080_mmio + PLX_REG_BIGEND);
 
-       writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+       writel(0, devpriv->plx9080_mmio + PLX_REG_INTCSR);
 
        gsc_hpdi_abort_dma(dev, 0);
        gsc_hpdi_abort_dma(dev, 1);
@@ -564,27 +563,27 @@ static void gsc_hpdi_init_plx9080(struct comedi_device *dev)
        /* configure dma0 mode */
        bits = 0;
        /* enable ready input */
-       bits |= PLX_DMA_EN_READYIN_BIT;
+       bits |= PLX_DMAMODE_READYIEN;
        /* enable dma chaining */
-       bits |= PLX_EN_CHAIN_BIT;
+       bits |= PLX_DMAMODE_CHAINEN;
        /*
         * enable interrupt on dma done
         * (probably don't need this, since chain never finishes)
         */
-       bits |= PLX_EN_DMA_DONE_INTR_BIT;
+       bits |= PLX_DMAMODE_DONEIEN;
        /*
         * don't increment local address during transfers
         * (we are transferring from a fixed fifo register)
         */
-       bits |= PLX_LOCAL_ADDR_CONST_BIT;
+       bits |= PLX_DMAMODE_LACONST;
        /* route dma interrupt to pci bus */
-       bits |= PLX_DMA_INTR_PCI_BIT;
+       bits |= PLX_DMAMODE_INTRPCI;
        /* enable demand mode */
-       bits |= PLX_DEMAND_MODE_BIT;
+       bits |= PLX_DMAMODE_DEMAND;
        /* enable local burst mode */
-       bits |= PLX_DMA_LOCAL_BURST_EN_BIT;
-       bits |= PLX_LOCAL_BUS_32_WIDE_BITS;
-       writel(bits, plx_iobase + PLX_DMA0_MODE_REG);
+       bits |= PLX_DMAMODE_BURSTEN;
+       bits |= PLX_DMAMODE_WIDTH32;
+       writel(bits, plx_iobase + PLX_REG_DMAMODE0);
 }
 
 static int gsc_hpdi_auto_attach(struct comedi_device *dev,
@@ -680,7 +679,7 @@ static void gsc_hpdi_detach(struct comedi_device *dev)
                free_irq(dev->irq, dev);
        if (devpriv) {
                if (devpriv->plx9080_mmio) {
-                       writel(0, devpriv->plx9080_mmio + PLX_INTRCS_REG);
+                       writel(0, devpriv->plx9080_mmio + PLX_REG_INTCSR);
                        iounmap(devpriv->plx9080_mmio);
                }
                if (dev->mmio)
index b87192e0f9aa8ba93217aef26fb16b031cc1e469..6c4ff023717f0f80de0d153ceb879d5207124a89 100644 (file)
@@ -1,20 +1,20 @@
 /*
-  comedi/drivers/jr3_pci.c
-  hardware driver for JR3/PCI force sensor board
-
-  COMEDI - Linux Control and Measurement Device Interface
-  Copyright (C) 2007 Anders Blomdell <anders.blomdell@control.lth.se>
-
-  This program is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 2 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-*/
* comedi/drivers/jr3_pci.c
* hardware driver for JR3/PCI force sensor board
+ *
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 2007 Anders Blomdell <anders.blomdell@control.lth.se>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
* GNU General Public License for more details.
+ */
 /*
  * Driver: jr3_pci
  * Description: JR3/PCI force sensor board
@@ -231,7 +231,7 @@ static unsigned int jr3_pci_ai_read_chan(struct comedi_device *dev,
 
        if (chan < 56) {
                unsigned int axis = chan % 8;
-               unsigned filter = chan / 8;
+               unsigned int filter = chan / 8;
 
                switch (axis) {
                case 0:
@@ -690,7 +690,7 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
        if (sizeof(struct jr3_channel) != 0xc00) {
                dev_err(dev->class_dev,
                        "sizeof(struct jr3_channel) = %x [expected %x]\n",
-                       (unsigned)sizeof(struct jr3_channel), 0xc00);
+                       (unsigned int)sizeof(struct jr3_channel), 0xc00);
                return -EINVAL;
        }
 
index 3bf0caa18ab04a7af4046a74deba09252b82dbca..c0b7a300e4288940c692eb2f96f56e073f190e19 100644 (file)
@@ -150,7 +150,7 @@ struct me_private_data {
        unsigned short dac_ctrl;        /* Mirror of the DAC_CONTROL register */
 };
 
-static inline void sleep(unsigned sec)
+static inline void sleep(unsigned int sec)
 {
        schedule_timeout_interruptible(sec * HZ);
 }
index 826e4399c87ee0e2dc28835038f80c1640fa2771..9bda761433c2b5065850af066bb6b8014433ce83 100644 (file)
@@ -103,7 +103,7 @@ static const struct comedi_lrange range_mpc624_bipolar1 = {
 /* BIP_RANGE(1.01)  this is correct, */
         /*  but my MPC-624 actually seems to have a range of 2.02 */
         BIP_RANGE(2.02)
-        }
+       }
 };
 
 static const struct comedi_lrange range_mpc624_bipolar10 = {
@@ -112,7 +112,7 @@ static const struct comedi_lrange range_mpc624_bipolar10 = {
 /* BIP_RANGE(10.1)   this is correct, */
         /*  but my MPC-624 actually seems to have a range of 20.2 */
         BIP_RANGE(20.2)
-        }
+       }
 };
 
 static unsigned int mpc624_ai_get_sample(struct comedi_device *dev,
index 251117be1205fc0b7401d71dcd083236a8b87917..07f38e38546911c06981745120d980213eec3a83 100644 (file)
@@ -151,10 +151,10 @@ enum ni_65xx_boardid {
 
 struct ni_65xx_board {
        const char *name;
-       unsigned num_dio_ports;
-       unsigned num_di_ports;
-       unsigned num_do_ports;
-       unsigned legacy_invert:1;
+       unsigned int num_dio_ports;
+       unsigned int num_di_ports;
+       unsigned int num_do_ports;
+       unsigned int legacy_invert:1;
 };
 
 static const struct ni_65xx_board ni_65xx_boards[] = {
@@ -360,7 +360,7 @@ static int ni_65xx_dio_insn_config(struct comedi_device *dev,
        unsigned long base_port = (unsigned long)s->private;
        unsigned int chan = CR_CHAN(insn->chanspec);
        unsigned int chan_mask = NI_65XX_CHAN_TO_MASK(chan);
-       unsigned port = base_port + NI_65XX_CHAN_TO_PORT(chan);
+       unsigned int port = base_port + NI_65XX_CHAN_TO_PORT(chan);
        unsigned int interval;
        unsigned int val;
 
@@ -428,14 +428,14 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev,
        unsigned long base_port = (unsigned long)s->private;
        unsigned int base_chan = CR_CHAN(insn->chanspec);
        int last_port_offset = NI_65XX_CHAN_TO_PORT(s->n_chan - 1);
-       unsigned read_bits = 0;
+       unsigned int read_bits = 0;
        int port_offset;
 
        for (port_offset = NI_65XX_CHAN_TO_PORT(base_chan);
             port_offset <= last_port_offset; port_offset++) {
-               unsigned port = base_port + port_offset;
+               unsigned int port = base_port + port_offset;
                int base_port_channel = NI_65XX_PORT_TO_CHAN(port_offset);
-               unsigned port_mask, port_data, bits;
+               unsigned int port_mask, port_data, bits;
                int bitshift = base_port_channel - base_chan;
 
                if (bitshift >= 32)
@@ -640,7 +640,7 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
        struct pci_dev *pcidev = comedi_to_pci_dev(dev);
        const struct ni_65xx_board *board = NULL;
        struct comedi_subdevice *s;
-       unsigned i;
+       unsigned int i;
        int ret;
 
        if (context < ARRAY_SIZE(ni_65xx_boards))
index 02a53299097994cbaf90f258aa98b2fa6f2b1fc8..35ef1925703f75abdd42e7078f16773f3634f3a9 100644 (file)
@@ -170,12 +170,12 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
 #define DMA_Line_Control_Group1                76
 #define DMA_Line_Control_Group2                108
 /* channel zero is none */
-static inline unsigned primary_DMAChannel_bits(unsigned channel)
+static inline unsigned int primary_DMAChannel_bits(unsigned int channel)
 {
        return channel & 0x3;
 }
 
-static inline unsigned secondary_DMAChannel_bits(unsigned channel)
+static inline unsigned int secondary_DMAChannel_bits(unsigned int channel)
 {
        return (channel << 2) & 0xc;
 }
index 344aa343e5e199e216ef88207ae771b19d717ee4..d8917392b9f9165aae78707acd04fc600711dcbe 100644 (file)
@@ -1064,12 +1064,12 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
        struct mite *mite = devpriv->mite;
        resource_size_t daq_phys_addr;
        static const int Start_Cal_EEPROM = 0x400;
-       static const unsigned window_size = 10;
+       static const unsigned int window_size = 10;
        static const int serial_number_eeprom_offset = 0x4;
        static const int serial_number_eeprom_length = 0x4;
-       unsigned old_iodwbsr_bits;
-       unsigned old_iodwbsr1_bits;
-       unsigned old_iodwcr1_bits;
+       unsigned int old_iodwbsr_bits;
+       unsigned int old_iodwbsr1_bits;
+       unsigned int old_iodwcr1_bits;
        int i;
 
        /* IO Window 1 needs to be temporarily mapped to read the eeprom */
index 10472e6dd002e1c03e4b3919de551829c4233480..70ad497dd20b8ce7bd08d88d5e56e5f2cf37b215 100644 (file)
 #define PCMMIO_AI_LSB_REG                      0x00
 #define PCMMIO_AI_MSB_REG                      0x01
 #define PCMMIO_AI_CMD_REG                      0x02
-#define PCMMIO_AI_CMD_SE                       (1 << 7)
-#define PCMMIO_AI_CMD_ODD_CHAN                 (1 << 6)
+#define PCMMIO_AI_CMD_SE                       BIT(7)
+#define PCMMIO_AI_CMD_ODD_CHAN                 BIT(6)
 #define PCMMIO_AI_CMD_CHAN_SEL(x)              (((x) & 0x3) << 4)
 #define PCMMIO_AI_CMD_RANGE(x)                 (((x) & 0x3) << 2)
 #define PCMMIO_RESOURCE_REG                    0x02
 #define PCMMIO_RESOURCE_IRQ(x)                 (((x) & 0xf) << 0)
 #define PCMMIO_AI_STATUS_REG                   0x03
-#define PCMMIO_AI_STATUS_DATA_READY            (1 << 7)
-#define PCMMIO_AI_STATUS_DATA_DMA_PEND         (1 << 6)
-#define PCMMIO_AI_STATUS_CMD_DMA_PEND          (1 << 5)
-#define PCMMIO_AI_STATUS_IRQ_PEND              (1 << 4)
-#define PCMMIO_AI_STATUS_DATA_DRQ_ENA          (1 << 2)
-#define PCMMIO_AI_STATUS_REG_SEL               (1 << 3)
-#define PCMMIO_AI_STATUS_CMD_DRQ_ENA           (1 << 1)
-#define PCMMIO_AI_STATUS_IRQ_ENA               (1 << 0)
+#define PCMMIO_AI_STATUS_DATA_READY            BIT(7)
+#define PCMMIO_AI_STATUS_DATA_DMA_PEND         BIT(6)
+#define PCMMIO_AI_STATUS_CMD_DMA_PEND          BIT(5)
+#define PCMMIO_AI_STATUS_IRQ_PEND              BIT(4)
+#define PCMMIO_AI_STATUS_DATA_DRQ_ENA          BIT(2)
+#define PCMMIO_AI_STATUS_REG_SEL               BIT(3)
+#define PCMMIO_AI_STATUS_CMD_DRQ_ENA           BIT(1)
+#define PCMMIO_AI_STATUS_IRQ_ENA               BIT(0)
 #define PCMMIO_AI_RES_ENA_REG                  0x03
 #define PCMMIO_AI_RES_ENA_CMD_REG_ACCESS       (0 << 3)
-#define PCMMIO_AI_RES_ENA_AI_RES_ACCESS                (1 << 3)
-#define PCMMIO_AI_RES_ENA_DIO_RES_ACCESS       (1 << 4)
+#define PCMMIO_AI_RES_ENA_AI_RES_ACCESS                BIT(3)
+#define PCMMIO_AI_RES_ENA_DIO_RES_ACCESS       BIT(4)
 #define PCMMIO_AI_2ND_ADC_OFFSET               0x04
 
 #define PCMMIO_AO_LSB_REG                      0x08
 #define PCMMIO_AO_CMD_CHAN_SEL(x)              (((x) & 0x03) << 1)
 #define PCMMIO_AO_CMD_CHAN_SEL_ALL             (0x0f << 0)
 #define PCMMIO_AO_STATUS_REG                   0x0b
-#define PCMMIO_AO_STATUS_DATA_READY            (1 << 7)
-#define PCMMIO_AO_STATUS_DATA_DMA_PEND         (1 << 6)
-#define PCMMIO_AO_STATUS_CMD_DMA_PEND          (1 << 5)
-#define PCMMIO_AO_STATUS_IRQ_PEND              (1 << 4)
-#define PCMMIO_AO_STATUS_DATA_DRQ_ENA          (1 << 2)
-#define PCMMIO_AO_STATUS_REG_SEL               (1 << 3)
-#define PCMMIO_AO_STATUS_CMD_DRQ_ENA           (1 << 1)
-#define PCMMIO_AO_STATUS_IRQ_ENA               (1 << 0)
+#define PCMMIO_AO_STATUS_DATA_READY            BIT(7)
+#define PCMMIO_AO_STATUS_DATA_DMA_PEND         BIT(6)
+#define PCMMIO_AO_STATUS_CMD_DMA_PEND          BIT(5)
+#define PCMMIO_AO_STATUS_IRQ_PEND              BIT(4)
+#define PCMMIO_AO_STATUS_DATA_DRQ_ENA          BIT(2)
+#define PCMMIO_AO_STATUS_REG_SEL               BIT(3)
+#define PCMMIO_AO_STATUS_CMD_DRQ_ENA           BIT(1)
+#define PCMMIO_AO_STATUS_IRQ_ENA               BIT(0)
 #define PCMMIO_AO_RESOURCE_ENA_REG             0x0b
 #define PCMMIO_AO_2ND_DAC_OFFSET               0x04
 
index 7ea813022ff6f86513f85fc7e8ca54bc5fe77c65..8ad64f2625fe48eb6642143956501657ba00e9ac 100644 (file)
@@ -307,7 +307,7 @@ static void pcmuio_stop_intr(struct comedi_device *dev,
 
 static void pcmuio_handle_intr_subdev(struct comedi_device *dev,
                                      struct comedi_subdevice *s,
-                                     unsigned triggered)
+                                     unsigned int triggered)
 {
        struct pcmuio_private *devpriv = dev->private;
        int asic = pcmuio_subdevice_to_asic(s);
index 8d1aee00b19fc2dba635d4c946e842f1d22e4f19..0e20cc5c9a693b4c793546ba6530826ba86e8227 100644 (file)
@@ -3,15 +3,6 @@
  *
  * Copyright (C) 2002,2003 Frank Mori Hess <fmhess@users.sourceforge.net>
  *
- * I modified this file from the plx9060.h header for the
- * wanXL device driver in the linux kernel,
- * for the register offsets and bit definitions.  Made minor modifications,
- * added plx9080 registers and
- * stripped out stuff that was specifically for the wanXL driver.
- * Note: I've only made sure the definitions are correct as far
- * as I make use of them.  There are still various plx9060-isms
- * left in this header file.
- *
  ********************************************************************
  *
  * Copyright (C) 1999 RG Studio s.c.
 #ifndef __COMEDI_PLX9080_H
 #define __COMEDI_PLX9080_H
 
-/*  descriptor block used for chained dma transfers */
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+/**
+ * struct plx_dma_desc - DMA descriptor format for PLX PCI 9080
+ * @pci_start_addr:    PCI Bus address for transfer (DMAPADR).
+ * @local_start_addr:  Local Bus address for transfer (DMALADR).
+ * @transfer_size:     Transfer size in bytes (max 8 MiB) (DMASIZ).
+ * @next:              Address of next descriptor + flags (DMADPR).
+ *
+ * Describes the format of a scatter-gather DMA descriptor for the PLX
+ * PCI 9080.  All members are raw, little-endian register values that
+ * will be transferred by the DMA engine from local or PCI memory into
+ * corresponding registers for the DMA channel.
+ *
+ * The DMA descriptors must be aligned on a 16-byte boundary.  Bits 3:0
+ * of @next contain flags describing the address space of the next
+ * descriptor (local or PCI), an "end of chain" marker, an "interrupt on
+ * terminal count" bit, and a data transfer direction.
+ */
 struct plx_dma_desc {
        __le32 pci_start_addr;
        __le32 local_start_addr;
-       /* transfer_size is in bytes, only first 23 bits of register are used */
        __le32 transfer_size;
-       /*
-        * address of next descriptor (quad word aligned), plus some
-        * additional bits (see PLX_DMA0_DESCRIPTOR_REG)
-        */
        __le32 next;
 };
 
-/**********************************************************************
-**            Register Offsets and Bit Definitions
-**
-** Note: All offsets zero relative.  IE. Some standard base address
-** must be added to the Register Number to properly access the register.
-**
-**********************************************************************/
-
-/* L, Local Addr Space 0 Range Register */
-#define PLX_LAS0RNG_REG         0x0000
-/* L, Local Addr Space 1 Range Register */
-#define PLX_LAS1RNG_REG         0x00f0
-#define  LRNG_IO           0x00000001  /* Map to: 1=I/O, 0=Mem */
-#define  LRNG_ANY32        0x00000000  /* Locate anywhere in 32 bit */
-#define  LRNG_LT1MB        0x00000002  /* Locate in 1st meg */
-#define  LRNG_ANY64        0x00000004  /* Locate anywhere in 64 bit */
-/*  bits that specify range for memory io */
-#define  LRNG_MEM_MASK     0xfffffff0
-/*  bits that specify range for normal io */
-#define  LRNG_IO_MASK     0xfffffffa
-/* L, Local Addr Space 0 Remap Register */
-#define PLX_LAS0MAP_REG         0x0004
-/* L, Local Addr Space 1 Remap Register */
-#define PLX_LAS1MAP_REG         0x00f4
-#define  LMAP_EN           0x00000001  /* Enable slave decode */
-/*  bits that specify decode for memory io */
-#define  LMAP_MEM_MASK     0xfffffff0
-/*  bits that specify decode bits for normal io */
-#define  LMAP_IO_MASK     0xfffffffa
-
 /*
- * Mode/Arbitration Register.
+ * Register Offsets and Bit Definitions
  */
-#define PLX_MARB_REG         0x8       /* L, Local Arbitration Register */
-#define PLX_DMAARB_REG      0xac
-enum marb_bits {
-       MARB_LLT_MASK = 0x000000ff,     /* Local Bus Latency Timer */
-       MARB_LPT_MASK = 0x0000ff00,     /* Local Bus Pause Timer */
-       MARB_LTEN = 0x00010000, /* Latency Timer Enable */
-       MARB_LPEN = 0x00020000, /* Pause Timer Enable */
-       MARB_BREQ = 0x00040000, /* Local Bus BREQ Enable */
-       MARB_DMA_PRIORITY_MASK = 0x00180000,
-       /* local bus direct slave give up bus mode */
-       MARB_LBDS_GIVE_UP_BUS_MODE = 0x00200000,
-       /* direct slave LLOCKo# enable */
-       MARB_DS_LLOCK_ENABLE = 0x00400000,
-       MARB_PCI_REQUEST_MODE = 0x00800000,
-       MARB_PCIV21_MODE = 0x01000000,  /* pci specification v2.1 mode */
-       MARB_PCI_READ_NO_WRITE_MODE = 0x02000000,
-       MARB_PCI_READ_WITH_WRITE_FLUSH_MODE = 0x04000000,
-       /* gate local bus latency timer with BREQ */
-       MARB_GATE_TIMER_WITH_BREQ = 0x08000000,
-       MARB_PCI_READ_NO_FLUSH_MODE = 0x10000000,
-       MARB_USE_SUBSYSTEM_IDS = 0x20000000,
-};
-
-#define PLX_BIGEND_REG 0xc
-enum bigend_bits {
-       /* use big endian ordering for configuration register accesses */
-       BIGEND_CONFIG = 0x1,
-       BIGEND_DIRECT_MASTER = 0x2,
-       BIGEND_DIRECT_SLAVE_LOCAL0 = 0x4,
-       BIGEND_ROM = 0x8,
-       /*
-        * use byte lane consisting of most significant bits instead of
-        * least significant
-        */
-       BIGEND_BYTE_LANE = 0x10,
-       BIGEND_DIRECT_SLAVE_LOCAL1 = 0x20,
-       BIGEND_DMA1 = 0x40,
-       BIGEND_DMA0 = 0x80,
-};
 
+/* Local Address Space 0 Range Register */
+#define PLX_REG_LAS0RR         0x0000
+/* Local Address Space 1 Range Register */
+#define PLX_REG_LAS1RR         0x00f0
+
+#define PLX_LASRR_IO           BIT(0)          /* Map to: 1=I/O, 0=Mem */
+#define PLX_LASRR_ANY32                (BIT(1) * 0)    /* Locate anywhere in 32 bit */
+#define PLX_LASRR_LT1MB                (BIT(1) * 1)    /* Locate in 1st meg */
+#define PLX_LASRR_ANY64                (BIT(1) * 2)    /* Locate anywhere in 64 bit */
+#define PLX_LASRR_MLOC_MASK    GENMASK(2, 1)   /* Memory location bits */
+#define PLX_LASRR_PREFETCH     BIT(3)          /* Memory is prefetchable */
+/* bits that specify range for memory space decode bits */
+#define PLX_LASRR_MEM_MASK     GENMASK(31, 4)
+/* bits that specify range for i/o space decode bits */
+#define PLX_LASRR_IO_MASK      GENMASK(31, 2)
+
+/* Local Address Space 0 Local Base Address (Remap) Register */
+#define PLX_REG_LAS0BA         0x0004
+/* Local Address Space 1 Local Base Address (Remap) Register */
+#define PLX_REG_LAS1BA         0x00f4
+
+#define PLX_LASBA_EN           BIT(0)          /* Enable slave decode */
+/* bits that specify local base address for memory space */
+#define PLX_LASBA_MEM_MASK     GENMASK(31, 4)
+/* bits that specify local base address for i/o space */
+#define PLX_LASBA_IO_MASK      GENMASK(31, 2)
+
+/* Mode/Arbitration Register */
+#define PLX_REG_MARBR          0x0008
+/* DMA Arbitration Register (alias of MARBR). */
+#define PLX_REG_DMAARB         0x00ac
+
+/* Local Bus Latency Timer */
+#define PLX_MARBR_LT(x)                (BIT(0) * ((x) & 0xff))
+#define PLX_MARBR_LT_MASK      GENMASK(7, 0)
+#define PLX_MARBR_LT_SHIFT     0
+/* Local Bus Pause Timer */
+#define PLX_MARBR_PT(x)                (BIT(8) * ((x) & 0xff))
+#define PLX_MARBR_PT_MASK      GENMASK(15, 8)
+#define PLX_MARBR_PT_SHIFT     8
+/* Local Bus Latency Timer Enable */
+#define PLX_MARBR_LTEN         BIT(16)
+/* Local Bus Pause Timer Enable */
+#define PLX_MARBR_PTEN         BIT(17)
+/* Local Bus BREQ Enable */
+#define PLX_MARBR_BREQEN       BIT(18)
+/* DMA Channel Priority */
+#define PLX_MARBR_PRIO_ROT     (BIT(19) * 0)   /* Rotational priority */
+#define PLX_MARBR_PRIO_DMA0    (BIT(19) * 1)   /* DMA channel 0 has priority */
+#define PLX_MARBR_PRIO_DMA1    (BIT(19) * 2)   /* DMA channel 1 has priority */
+#define PLX_MARBR_PRIO_MASK    GENMASK(20, 19)
+/* Local Bus Direct Slave Give Up Bus Mode */
+#define PLX_MARBR_DSGUBM       BIT(21)
+/* Direct Slace LLOCKo# Enable */
+#define PLX_MARBR_DSLLOCKOEN   BIT(22)
+/* PCI Request Mode */
+#define PLX_MARBR_PCIREQM      BIT(23)
+/* PCI Specification v2.1 Mode */
+#define PLX_MARBR_PCIV21M      BIT(24)
+/* PCI Read No Write Mode */
+#define PLX_MARBR_PCIRNWM      BIT(25)
+/* PCI Read with Write Flush Mode */
+#define PLX_MARBR_PCIRWFM      BIT(26)
+/* Gate Local Bus Latency Timer with BREQ */
+#define PLX_MARBR_GLTBREQ      BIT(27)
+/* PCI Read No Flush Mode */
+#define PLX_MARBR_PCIRNFM      BIT(28)
 /*
-** Note: The Expansion ROM  stuff is only relevant to the PC environment.
-**       This expansion ROM code is executed by the host CPU at boot time.
-**       For this reason no bit definitions are provided here.
+ * Make reads from PCI Configuration register 0 return Subsystem ID and
+ * Subsystem Vendor ID instead of Device ID and Vendor ID
  */
-#define PLX_ROMRNG_REG         0x0010  /* L, Expn ROM Space Range Register */
-/* L, Local Addr Space Range Register */
-#define PLX_ROMMAP_REG         0x0014
-
-#define PLX_REGION0_REG         0x0018 /* L, Local Bus Region 0 Descriptor */
-#define  RGN_WIDTH         0x00000002  /* Local bus width bits */
-#define  RGN_8BITS         0x00000000  /* 08 bit Local Bus */
-#define  RGN_16BITS        0x00000001  /* 16 bit Local Bus */
-#define  RGN_32BITS        0x00000002  /* 32 bit Local Bus */
-#define  RGN_MWS           0x0000003C  /* Memory Access Wait States */
-#define  RGN_0MWS          0x00000000
-#define  RGN_1MWS          0x00000004
-#define  RGN_2MWS          0x00000008
-#define  RGN_3MWS          0x0000000C
-#define  RGN_4MWS          0x00000010
-#define  RGN_6MWS          0x00000018
-#define  RGN_8MWS          0x00000020
-#define  RGN_MRE           0x00000040  /* Memory Space Ready Input Enable */
-#define  RGN_MBE           0x00000080  /* Memory Space Bterm Input Enable */
-#define  RGN_READ_PREFETCH_DISABLE 0x00000100
-#define  RGN_ROM_PREFETCH_DISABLE 0x00000200
-#define  RGN_READ_PREFETCH_COUNT_ENABLE 0x00000400
-#define  RGN_RWS           0x003C0000  /* Expn ROM Wait States */
-#define  RGN_RRE           0x00400000  /* ROM Space Ready Input Enable */
-#define  RGN_RBE           0x00800000  /* ROM Space Bterm Input Enable */
-#define  RGN_MBEN          0x01000000  /* Memory Space Burst Enable */
-#define  RGN_RBEN          0x04000000  /* ROM Space Burst Enable */
-#define  RGN_THROT         0x08000000  /* De-assert TRDY when FIFO full */
-#define  RGN_TRD           0xF0000000  /* Target Ready Delay /8 */
-
-#define PLX_REGION1_REG         0x00f8 /* L, Local Bus Region 1 Descriptor */
-
-#define PLX_DMRNG_REG          0x001C  /* L, Direct Master Range Register */
-
-#define PLX_LBAPMEM_REG        0x0020  /* L, Lcl Base Addr for PCI mem space */
-
-#define PLX_LBAPIO_REG         0x0024  /* L, Lcl Base Addr for PCI I/O space */
-
-#define PLX_DMMAP_REG          0x0028  /* L, Direct Master Remap Register */
-#define  DMM_MAE           0x00000001  /* Direct Mstr Memory Acc Enable */
-#define  DMM_IAE           0x00000002  /* Direct Mstr I/O Acc Enable */
-#define  DMM_LCK           0x00000004  /* LOCK Input Enable */
-#define  DMM_PF4           0x00000008  /* Prefetch 4 Mode Enable */
-#define  DMM_THROT         0x00000010  /* Assert IRDY when read FIFO full */
-#define  DMM_PAF0          0x00000000  /* Programmable Almost fill level */
-#define  DMM_PAF1          0x00000020  /* Programmable Almost fill level */
-#define  DMM_PAF2          0x00000040  /* Programmable Almost fill level */
-#define  DMM_PAF3          0x00000060  /* Programmable Almost fill level */
-#define  DMM_PAF4          0x00000080  /* Programmable Almost fill level */
-#define  DMM_PAF5          0x000000A0  /* Programmable Almost fill level */
-#define  DMM_PAF6          0x000000C0  /* Programmable Almost fill level */
-#define  DMM_PAF7          0x000000D0  /* Programmable Almost fill level */
-#define  DMM_MAP           0xFFFF0000  /* Remap Address Bits */
-
-#define PLX_CAR_REG            0x002C  /* L, Configuration Address Register */
-#define  CAR_CT0           0x00000000  /* Config Type 0 */
-#define  CAR_CT1           0x00000001  /* Config Type 1 */
-#define  CAR_REG           0x000000FC  /* Register Number Bits */
-#define  CAR_FUN           0x00000700  /* Function Number Bits */
-#define  CAR_DEV           0x0000F800  /* Device Number Bits */
-#define  CAR_BUS           0x00FF0000  /* Bus Number Bits */
-#define  CAR_CFG           0x80000000  /* Config Spc Access Enable */
-
-#define PLX_DBR_IN_REG         0x0060  /* L, PCI to Local Doorbell Register */
-
-#define PLX_DBR_OUT_REG        0x0064  /* L, Local to PCI Doorbell Register */
-
-#define PLX_INTRCS_REG         0x0068  /* L, Interrupt Control/Status Reg */
-#define  ICS_AERR          0x00000001  /* Assert LSERR on ABORT */
-#define  ICS_PERR          0x00000002  /* Assert LSERR on Parity Error */
-#define  ICS_SERR          0x00000004  /* Generate PCI SERR# */
-#define  ICS_MBIE          0x00000008  /*  mailbox interrupt enable */
-#define  ICS_PIE           0x00000100  /* PCI Interrupt Enable */
-#define  ICS_PDIE          0x00000200  /* PCI Doorbell Interrupt Enable */
-#define  ICS_PAIE          0x00000400  /* PCI Abort Interrupt Enable */
-#define  ICS_PLIE          0x00000800  /* PCI Local Int Enable */
-#define  ICS_RAE           0x00001000  /* Retry Abort Enable */
-#define  ICS_PDIA          0x00002000  /* PCI Doorbell Interrupt Active */
-#define  ICS_PAIA          0x00004000  /* PCI Abort Interrupt Active */
-#define  ICS_LIA           0x00008000  /* Local Interrupt Active */
-#define  ICS_LIE           0x00010000  /* Local Interrupt Enable */
-#define  ICS_LDIE          0x00020000  /* Local Doorbell Int Enable */
-#define  ICS_DMA0_E        0x00040000  /* DMA #0 Interrupt Enable */
-#define  ICS_DMA1_E        0x00080000  /* DMA #1 Interrupt Enable */
-#define  ICS_LDIA          0x00100000  /* Local Doorbell Int Active */
-#define  ICS_DMA0_A        0x00200000  /* DMA #0 Interrupt Active */
-#define  ICS_DMA1_A        0x00400000  /* DMA #1 Interrupt Active */
-#define  ICS_BIA           0x00800000  /* BIST Interrupt Active */
-#define  ICS_TA_DM         0x01000000  /* Target Abort - Direct Master */
-#define  ICS_TA_DMA0       0x02000000  /* Target Abort - DMA #0 */
-#define  ICS_TA_DMA1       0x04000000  /* Target Abort - DMA #1 */
-#define  ICS_TA_RA         0x08000000  /* Target Abort - Retry Timeout */
-/*  mailbox x is active */
-#define  ICS_MBIA(x)       (0x10000000 << ((x) & 0x3))
-
-#define PLX_CONTROL_REG        0x006C  /* L, EEPROM Cntl & PCI Cmd Codes */
-#define  CTL_RDMA          0x0000000E  /* DMA Read Command */
-#define  CTL_WDMA          0x00000070  /* DMA Write Command */
-#define  CTL_RMEM          0x00000600  /* Memory Read Command */
-#define  CTL_WMEM          0x00007000  /* Memory Write Command */
-#define  CTL_USERO         0x00010000  /* USERO output pin control bit */
-#define  CTL_USERI         0x00020000  /* USERI input pin bit */
-#define  CTL_EE_CLK        0x01000000  /* EEPROM Clock line */
-#define  CTL_EE_CS         0x02000000  /* EEPROM Chip Select */
-#define  CTL_EE_W          0x04000000  /* EEPROM Write bit */
-#define  CTL_EE_R          0x08000000  /* EEPROM Read bit */
-#define  CTL_EECHK         0x10000000  /* EEPROM Present bit */
-#define  CTL_EERLD         0x20000000  /* EEPROM Reload Register */
-#define  CTL_RESET         0x40000000  /* !! Adapter Reset !! */
-#define  CTL_READY         0x80000000  /* Local Init Done */
-
-#define PLX_ID_REG     0x70    /*  hard-coded plx vendor and device ids */
-
-#define PLX_REVISION_REG       0x74    /*  silicon revision */
-
-#define PLX_DMA0_MODE_REG      0x80    /*  dma channel 0 mode register */
-#define PLX_DMA1_MODE_REG      0x94    /*  dma channel 0 mode register */
-#define  PLX_LOCAL_BUS_16_WIDE_BITS    0x1
-#define  PLX_LOCAL_BUS_32_WIDE_BITS    0x3
-#define  PLX_LOCAL_BUS_WIDTH_MASK      0x3
-#define  PLX_DMA_EN_READYIN_BIT        0x40    /*  enable ready in input */
-#define  PLX_EN_BTERM_BIT      0x80    /*  enable BTERM# input */
-#define  PLX_DMA_LOCAL_BURST_EN_BIT    0x100   /*  enable local burst mode */
-#define  PLX_EN_CHAIN_BIT      0x200   /*  enables chaining */
-/*  enables interrupt on dma done */
-#define  PLX_EN_DMA_DONE_INTR_BIT      0x400
-/*  hold local address constant (don't increment) */
-#define  PLX_LOCAL_ADDR_CONST_BIT      0x800
-/*  enables demand-mode for dma transfer */
-#define  PLX_DEMAND_MODE_BIT   0x1000
-#define  PLX_EOT_ENABLE_BIT    0x4000
-#define  PLX_STOP_MODE_BIT 0x8000
-/*  routes dma interrupt to pci bus (instead of local bus) */
-#define  PLX_DMA_INTR_PCI_BIT  0x20000
-
-/*  pci address that dma transfers start at */
-#define PLX_DMA0_PCI_ADDRESS_REG       0x84
-#define PLX_DMA1_PCI_ADDRESS_REG       0x98
-
-/*  local address that dma transfers start at */
-#define PLX_DMA0_LOCAL_ADDRESS_REG     0x88
-#define PLX_DMA1_LOCAL_ADDRESS_REG     0x9c
-
-/*  number of bytes to transfer (first 23 bits) */
-#define PLX_DMA0_TRANSFER_SIZE_REG     0x8c
-#define PLX_DMA1_TRANSFER_SIZE_REG     0xa0
-
-#define PLX_DMA0_DESCRIPTOR_REG        0x90    /*  descriptor pointer register */
-#define PLX_DMA1_DESCRIPTOR_REG        0xa4
-/*  descriptor is located in pci space (not local space) */
-#define  PLX_DESC_IN_PCI_BIT   0x1
-#define  PLX_END_OF_CHAIN_BIT  0x2     /*  end of chain bit */
-/*  interrupt when this descriptor's transfer is finished */
-#define  PLX_INTR_TERM_COUNT   0x4
-/*  transfer from local to pci bus (not pci to local) */
-#define  PLX_XFER_LOCAL_TO_PCI 0x8
-
-#define PLX_DMA0_CS_REG        0xa8    /*  command status register */
-#define PLX_DMA1_CS_REG        0xa9
-#define  PLX_DMA_EN_BIT        0x1     /*  enable dma channel */
-#define  PLX_DMA_START_BIT     0x2     /*  start dma transfer */
-#define  PLX_DMA_ABORT_BIT     0x4     /*  abort dma transfer */
-#define  PLX_CLEAR_DMA_INTR_BIT        0x8     /*  clear dma interrupt */
-#define  PLX_DMA_DONE_BIT      0x10    /*  transfer done status bit */
-
-#define PLX_DMA0_THRESHOLD_REG 0xb0    /*  command status register */
+#define PLX_MARBR_SUBSYSIDS    BIT(29)
+
+/* Big/Little Endian Descriptor Register */
+#define PLX_REG_BIGEND         0x000c
+
+/* Configuration Register Big Endian Mode */
+#define PLX_BIGEND_CONFIG      BIT(0)
+/* Direct Master Big Endian Mode */
+#define PLX_BIGEND_DM          BIT(1)
+/* Direct Slave Address Space 0 Big Endian Mode */
+#define PLX_BIGEND_DSAS0       BIT(2)
+/* Direct Slave Expansion ROM Big Endian Mode */
+#define PLX_BIGEND_EROM                BIT(3)
+/* Big Endian Byte Lane Mode - use most significant byte lanes */
+#define PLX_BIGEND_BEBLM       BIT(4)
+/* Direct Slave Address Space 1 Big Endian Mode */
+#define PLX_BIGEND_DSAS1       BIT(5)
+/* DMA Channel 1 Big Endian Mode */
+#define PLX_BIGEND_DMA1                BIT(6)
+/* DMA Channel 0 Big Endian Mode */
+#define PLX_BIGEND_DMA0                BIT(7)
+/* DMA Channel N Big Endian Mode (N <= 1) */
+#define PLX_BIGEND_DMA(n)      ((n) ? PLX_BIGEND_DMA1 : PLX_BIGEND_DMA0)
 
 /*
- * Accesses near the end of memory can cause the PLX chip
- * to pre-fetch data off of end-of-ram.  Limit the size of
- * memory so host-side accesses cannot occur.
+ * Note: The Expansion ROM  stuff is only relevant to the PC environment.
+ *       This expansion ROM code is executed by the host CPU at boot time.
+ *       For this reason no bit definitions are provided here.
  */
 
-#define PLX_PREFETCH   32
+/* Expansion ROM Range Register */
+#define PLX_REG_EROMRR         0x0010
+/* Expansion ROM Local Base Address (Remap) Register */
+#define PLX_REG_EROMBA         0x0014
+
+/* Local Address Space 0/Expansion ROM Bus Region Descriptor Register */
+#define PLX_REG_LBRD0          0x0018
+/* Local Address Space 1 Bus Region Descriptor Register */
+#define PLX_REG_LBRD1          0x00f8
+
+/* Memory Space Local Bus Width */
+#define PLX_LBRD_MSWIDTH8      (BIT(0) * 0)    /* 8 bits wide */
+#define PLX_LBRD_MSWIDTH16     (BIT(0) * 1)    /* 16 bits wide */
+#define PLX_LBRD_MSWIDTH32     (BIT(0) * 2)    /* 32 bits wide */
+#define PLX_LBRD_MSWIDTH32A    (BIT(0) * 3)    /* 32 bits wide */
+#define PLX_LBRD_MSWIDTH_MASK  GENMASK(1, 0)
+#define PLX_LBRD_MSWIDTH_SHIFT 0
+/* Memory Space Internal Wait States */
+#define PLX_LBRD_MSIWS(x)      (BIT(2) * ((x) & 0xf))
+#define PLX_LBRD_MSIWS_MASK    GENMASK(5, 2)
+#define PLX_LBRD_MSIWS_SHIFT   2
+/* Memory Space Ready Input Enable */
+#define PLX_LBRD_MSREADYIEN    BIT(6)
+/* Memory Space BTERM# Input Enable */
+#define PLX_LBRD_MSBTERMIEN    BIT(7)
+/* Memory Space 0 Prefetch Disable (LBRD0 only) */
+#define PLX_LBRD0_MSPREDIS     BIT(8)
+/* Memory Space 1 Burst Enable (LBRD1 only) */
+#define PLX_LBRD1_MSBURSTEN    BIT(8)
+/* Expansion ROM Space Prefetch Disable (LBRD0 only) */
+#define PLX_LBRD0_EROMPREDIS   BIT(9)
+/* Memory Space 1 Prefetch Disable (LBRD1 only) */
+#define PLX_LBRD1_MSPREDIS     BIT(9)
+/* Read Prefetch Count Enable */
+#define PLX_LBRD_RPFCOUNTEN    BIT(10)
+/* Prefetch Counter */
+#define PLX_LBRD_PFCOUNT(x)    (BIT(11) * ((x) & 0xf))
+#define PLX_LBRD_PFCOUNT_MASK  GENMASK(14, 11)
+#define PLX_LBRD_PFCOUNT_SHIFT 11
+/* Expansion ROM Space Local Bus Width (LBRD0 only) */
+#define PLX_LBRD0_EROMWIDTH8   (BIT(16) * 0)   /* 8 bits wide */
+#define PLX_LBRD0_EROMWIDTH16  (BIT(16) * 1)   /* 16 bits wide */
+#define PLX_LBRD0_EROMWIDTH32  (BIT(16) * 2)   /* 32 bits wide */
+#define PLX_LBRD0_EROMWIDTH32A (BIT(16) * 3)   /* 32 bits wide */
+#define PLX_LBRD0_EROMWIDTH_MASK       GENMASK(17, 16)
+#define PLX_LBRD0_EROMWIDTH_SHIFT      16
+/* Expansion ROM Space Internal Wait States (LBRD0 only) */
+#define PLX_LBRD0_EROMIWS(x)   (BIT(18) * ((x) & 0xf))
+#define PLX_LBRD0_EROMIWS_MASK GENMASK(21, 18)
+#define PLX_LBRD0_EROMIWS_SHIFT        18
+/* Expansion ROM Space Ready Input Enable (LBDR0 only) */
+#define PLX_LBRD0_EROMREADYIEN BIT(22)
+/* Expansion ROM Space BTERM# Input Enable (LBRD0 only) */
+#define PLX_LBRD0_EROMBTERMIEN BIT(23)
+/* Memory Space 0 Burst Enable (LBRD0 only) */
+#define PLX_LBRD0_MSBURSTEN    BIT(24)
+/* Extra Long Load From Serial EEPROM  (LBRD0 only) */
+#define PLX_LBRD0_EELONGLOAD   BIT(25)
+/* Expansion ROM Space Burst Enable (LBRD0 only) */
+#define PLX_LBRD0_EROMBURSTEN  BIT(26)
+/* Direct Slave PCI Write Mode - assert TRDY# when FIFO full (LBRD0 only) */
+#define PLX_LBRD0_DSWMTRDY     BIT(27)
+/* PCI Target Retry Delay Clocks / 8 (LBRD0 only) */
+#define PLX_LBRD0_TRDELAY(x)   (BIT(28) * ((x) & 0xF))
+#define PLX_LBRD0_TRDELAY_MASK GENMASK(31, 28)
+#define PLX_LBRD0_TRDELAY_SHIFT        28
+
+/* Local Range Register for Direct Master to PCI */
+#define PLX_REG_DMRR           0x001c
+
+/* Local Bus Base Address Register for Direct Master to PCI Memory */
+#define PLX_REG_DMLBAM         0x0020
+
+/* Local Base Address Register for Direct Master to PCI IO/CFG */
+#define PLX_REG_DMLBAI         0x0024
+
+/* PCI Base Address (Remap) Register for Direct Master to PCI Memory */
+#define PLX_REG_DMPBAM         0x0028
+
+/* Direct Master Memory Access Enable */
+#define PLX_DMPBAM_MEMACCEN    BIT(0)
+/* Direct Master I/O Access Enable */
+#define PLX_DMPBAM_IOACCEN     BIT(1)
+/* LLOCK# Input Enable */
+#define PLX_DMPBAM_LLOCKIEN    BIT(2)
+/* Direct Master Read Prefetch Size Control (bits 12, 3) */
+#define PLX_DMPBAM_RPSIZECONT  ((BIT(12) * 0) | (BIT(3) * 0))
+#define PLX_DMPBAM_RPSIZE4     ((BIT(12) * 0) | (BIT(3) * 1))
+#define PLX_DMPBAM_RPSIZE8     ((BIT(12) * 1) | (BIT(3) * 0))
+#define PLX_DMPBAM_RPSIZE16    ((BIT(12) * 1) | (BIT(3) * 1))
+#define PLX_DMPBAM_RPSIZE_MASK (BIT(12) | BIT(3))
+/* Direct Master PCI Read Mode - deassert IRDY when FIFO full */
+#define PLX_DMPBAM_RMIRDY      BIT(4)
+/* Programmable Almost Full Level (bits 10, 8:5) */
+#define PLX_DMPBAM_PAFL(x)     ((BIT(10) * !!((x) & 0x10)) | \
+                                (BIT(5) * ((x) & 0xf)))
+#define PLX_DMPBAM_TO_PAFL(v)  ((((BIT(10) & (v)) >> 1) | \
+                                 (GENMASK(8, 5) & (v))) >> 5)
+#define PLX_DMPBAM_PAFL_MASK   (BIT(10) | GENMASK(8, 5))
+/* Write And Invalidate Mode */
+#define PLX_DMPBAM_WIM         BIT(9)
+/* Direct Master Prefetch Limit */
+#define PLX_DBPBAM_PFLIMIT     BIT(11)
+/* I/O Remap Select */
+#define PLX_DMPBAM_IOREMAPSEL  BIT(13)
+/* Direct Master Write Delay */
+#define PLX_DMPBAM_WDELAYNONE  (BIT(14) * 0)
+#define PLX_DMPBAM_WDELAY4     (BIT(14) * 1)
+#define PLX_DMPBAM_WDELAY8     (BIT(14) * 2)
+#define PLX_DMPBAM_WDELAY16    (BIT(14) * 3)
+#define PLX_DMPBAM_WDELAY_MASK GENMASK(15, 14)
+/* Remap of Local-to-PCI Space Into PCI Address Space */
+#define PLX_DMPBAM_REMAP_MASK  GENMASK(31, 16)
+
+/* PCI Configuration Address Register for Direct Master to PCI IO/CFG */
+#define PLX_REG_DMCFGA         0x002c
+
+/* Congiguration Type */
+#define PLX_DMCFGA_TYPE0       (BIT(0) * 0)
+#define PLX_DMCFGA_TYPE1       (BIT(0) * 1)
+#define PLX_DMCFGA_TYPE_MASK   GENMASK(1, 0)
+/* Register Number */
+#define PLX_DMCFGA_REGNUM(x)   (BIT(2) * ((x) & 0x3f))
+#define PLX_DMCFGA_REGNUM_MASK GENMASK(7, 2)
+#define PLX_DMCFGA_REGNUM_SHIFT        2
+/* Function Number */
+#define PLX_DMCFGA_FUNCNUM(x)  (BIT(8) * ((x) & 0x7))
+#define PLX_DMCFGA_FUNCNUM_MASK        GENMASK(10, 8)
+#define PLX_DMCFGA_FUNCNUM_SHIFT 8
+/* Device Number */
+#define PLX_DMCFGA_DEVNUM(x)   (BIT(11) * ((x) & 0x1f))
+#define PLX_DMCFGA_DEVNUM_MASK GENMASK(15, 11)
+#define PLX_DMCFGA_DEVNUM_SHIFT        11
+/* Bus Number */
+#define PLX_DMCFGA_BUSNUM(x)   (BIT(16) * ((x) & 0xff))
+#define PLX_DMCFGA_BUSNUM_MASK GENMASK(23, 16)
+#define PLX_DMCFGA_BUSNUM_SHIFT        16
+/* Configuration Enable */
+#define PLX_DMCFGA_CONFIGEN    BIT(31)
 
 /*
- * The PCI Interface, via the PCI-9060 Chip, has up to eight (8) Mailbox
- * Registers.  The PUTS (Power-Up Test Suite) handles the board-side
- * interface/interaction using the first 4 registers.  Specifications for
- * the use of the full PUTS' command and status interface is contained
- * within a separate SBE PUTS Manual.  The Host-Side Device Driver only
- * uses a subset of the full PUTS interface.
+ * Mailbox Register N (N <= 7)
+ *
+ * Note that if the I2O feature is enabled (QSR[0] is set), Mailbox Register 0
+ * is replaced by the Inbound Queue Port, and Mailbox Register 1 is replaced
+ * by the Outbound Queue Port.  However, Mailbox Register 0 and 1 are always
+ * accessible at alternative offsets if the I2O feature is enabled.
  */
+#define PLX_REG_MBOX(n)                (0x0040 + (n) * 4)
+#define PLX_REG_MBOX0          PLX_REG_MBOX(0)
+#define PLX_REG_MBOX1          PLX_REG_MBOX(1)
+#define PLX_REG_MBOX2          PLX_REG_MBOX(2)
+#define PLX_REG_MBOX3          PLX_REG_MBOX(3)
+#define PLX_REG_MBOX4          PLX_REG_MBOX(4)
+#define PLX_REG_MBOX5          PLX_REG_MBOX(5)
+#define PLX_REG_MBOX6          PLX_REG_MBOX(6)
+#define PLX_REG_MBOX7          PLX_REG_MBOX(7)
+
+/* Alternative offsets for Mailbox Registers 0 and 1 (in case I2O is enabled) */
+#define PLX_REG_ALT_MBOX(n)    ((n) < 2 ? 0x0078 + (n) * 4 : PLX_REG_MBOX(n))
+#define PLX_REG_ALT_MBOX0      PLX_REG_ALT_MBOX(0)
+#define PLX_REG_ALT_MBOX1      PLX_REG_ALT_MBOX(1)
+
+/* PCI-to-Local Doorbell Register */
+#define PLX_REG_P2LDBELL       0x0060
+
+/* Local-to-PCI Doorbell Register */
+#define PLX_REG_L2PDBELL       0x0064
+
+/* Interrupt Control/Status Register */
+#define PLX_REG_INTCSR         0x0068
+
+/* Enable Local Bus LSERR# when PCI Bus Target Abort or Master Abort occurs */
+#define PLX_INTCSR_LSEABORTEN  BIT(0)
+/* Enable Local Bus LSERR# when PCI parity error occurs */
+#define PLX_INTCSR_LSEPARITYEN BIT(1)
+/* Generate PCI Bus SERR# when set to 1 */
+#define PLX_INTCSR_GENSERR     BIT(2)
+/* Mailbox Interrupt Enable (local bus interrupts on PCI write to MBOX0-3) */
+#define PLX_INTCSR_MBIEN       BIT(3)
+/* PCI Interrupt Enable */
+#define PLX_INTCSR_PIEN                BIT(8)
+/* PCI Doorbell Interrupt Enable */
+#define PLX_INTCSR_PDBIEN      BIT(9)
+/* PCI Abort Interrupt Enable */
+#define PLX_INTCSR_PABORTIEN   BIT(10)
+/* PCI Local Interrupt Enable */
+#define PLX_INTCSR_PLIEN       BIT(11)
+/* Retry Abort Enable (for diagnostic purposes only) */
+#define PLX_INTCSR_RAEN                BIT(12)
+/* PCI Doorbell Interrupt Active (read-only) */
+#define PLX_INTCSR_PDBIA       BIT(13)
+/* PCI Abort Interrupt Active (read-only) */
+#define PLX_INTCSR_PABORTIA    BIT(14)
+/* Local Interrupt (LINTi#) Active (read-only) */
+#define PLX_INTCSR_PLIA                BIT(15)
+/* Local Interrupt Output (LINTo#) Enable */
+#define PLX_INTCSR_LIOEN       BIT(16)
+/* Local Doorbell Interrupt Enable */
+#define PLX_INTCSR_LDBIEN      BIT(17)
+/* DMA Channel 0 Interrupt Enable */
+#define PLX_INTCSR_DMA0IEN     BIT(18)
+/* DMA Channel 1 Interrupt Enable */
+#define PLX_INTCSR_DMA1IEN     BIT(19)
+/* DMA Channel N Interrupt Enable (N <= 1) */
+#define PLX_INTCSR_DMAIEN(n)   ((n) ? PLX_INTCSR_DMA1IEN : PLX_INTCSR_DMA0IEN)
+/* Local Doorbell Interrupt Active (read-only) */
+#define PLX_INTCSR_LDBIA       BIT(20)
+/* DMA Channel 0 Interrupt Active (read-only) */
+#define PLX_INTCSR_DMA0IA      BIT(21)
+/* DMA Channel 1 Interrupt Active (read-only) */
+#define PLX_INTCSR_DMA1IA      BIT(22)
+/* DMA Channel N Interrupt Active (N <= 1) (read-only) */
+#define PLX_INTCSR_DMAIA(n)    ((n) ? PLX_INTCSR_DMA1IA : PLX_INTCSR_DMA0IA)
+/* BIST Interrupt Active (read-only) */
+#define PLX_INTCSR_BISTIA      BIT(23)
+/* Direct Master Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDM     BIT(24)
+/* DMA Channel 0 Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDMA0   BIT(25)
+/* DMA Channel 1 Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDMA1   BIT(26)
+/* DMA Channel N Not Bus Master During Master Or Target Abort (read-only) */
+#define PLX_INTCSR_ABNOTDMA(n) ((n) ? PLX_INTCSR_ABNOTDMA1 \
+                                    : PLX_INTCSR_ABNOTDMA0)
+/* Target Abort Not Generated After 256 Master Retries (read-only) */
+#define PLX_INTCSR_ABNOTRETRY  BIT(27)
+/* PCI Wrote Mailbox 0 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB0IA       BIT(28)
+/* PCI Wrote Mailbox 1 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB1IA       BIT(29)
+/* PCI Wrote Mailbox 2 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB2IA       BIT(30)
+/* PCI Wrote Mailbox 3 (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MB3IA       BIT(31)
+/* PCI Wrote Mailbox N (N <= 3) (enabled if bit 3 set) (read-only) */
+#define PLX_INTCSR_MBIA(n)     BIT(28 + (n))
 
-/*****************************************/
-/***    MAILBOX #(-1) - MEM ACCESS STS ***/
-/*****************************************/
-
-#define MBX_STS_VALID      0x57584744  /* 'WXGD' */
-#define MBX_STS_DILAV      0x44475857  /* swapped = 'DGXW' */
-
-/*****************************************/
-/***    MAILBOX #0  -  PUTS STATUS     ***/
-/*****************************************/
-
-#define MBX_STS_MASK       0x000000ff  /* PUTS Status Register bits */
-#define MBX_STS_TMASK      0x0000000f  /* register bits for TEST number */
-
-#define MBX_STS_PCIRESET   0x00000100  /* Host issued PCI reset request */
-#define MBX_STS_BUSY       0x00000080  /* PUTS is in progress */
-#define MBX_STS_ERROR      0x00000040  /* PUTS has failed */
 /*
- * Undefined -> status in transition. We are in process of changing bits;
- * we SET Error bit before RESET of Busy bit
+ * Serial EEPROM Control, PCI Command Codes, User I/O Control,
+ * Init Control Register
  */
-#define MBX_STS_RESERVED   0x000000c0
-
-#define MBX_RESERVED_5     0x00000020  /* FYI: reserved/unused bit */
-#define MBX_RESERVED_4     0x00000010  /* FYI: reserved/unused bit */
-
-/******************************************/
-/***    MAILBOX #1  -  PUTS COMMANDS    ***/
-/******************************************/
-
+#define PLX_REG_CNTRL          0x006c
+
+/* PCI Read Command Code For DMA */
+#define PLX_CNTRL_CCRDMA(x)    (BIT(0) * ((x) & 0xf))
+#define PLX_CNTRL_CCRDMA_MASK  GENMASK(3, 0)
+#define PLX_CNTRL_CCRDMA_SHIFT 0
+#define PLX_CNTRL_CCRDMA_NORMAL        PLX_CNTRL_CCRDMA(14)    /* value after reset */
+/* PCI Write Command Code For DMA 0 */
+#define PLX_CNTRL_CCWDMA(x)    (BIT(4) * ((x) & 0xf))
+#define PLX_CNTRL_CCWDMA_MASK  GENMASK(7, 4)
+#define PLX_CNTRL_CCWDMA_SHIFT 4
+#define PLX_CNTRL_CCWDMA_NORMAL        PLX_CNTRL_CCWDMA(7)     /* value after reset */
+/* PCI Memory Read Command Code For Direct Master */
+#define PLX_CNTRL_CCRDM(x)     (BIT(8) * ((x) & 0xf))
+#define PLX_CNTRL_CCRDM_MASK   GENMASK(11, 8)
+#define PLX_CNTRL_CCRDM_SHIFT  8
+#define PLX_CNTRL_CCRDM_NORMAL PLX_CNTRL_CCRDM(6)      /* value after reset */
+/* PCI Memory Write Command Code For Direct Master */
+#define PLX_CNTRL_CCWDM(x)     (BIT(12) * ((x) & 0xf))
+#define PLX_CNTRL_CCWDM_MASK   GENMASK(15, 12)
+#define PLX_CNTRL_CCWDM_SHIFT  12
+#define PLX_CNTRL_CCWDM_NORMAL PLX_CNTRL_CCWDM(7)      /* value after reset */
+/* General Purpose Output (USERO) */
+#define PLX_CNTRL_USERO                BIT(16)
+/* General Purpose Input (USERI) (read-only) */
+#define PLX_CNTRL_USERI                BIT(17)
+/* Serial EEPROM Clock Output (EESK) */
+#define PLX_CNTRL_EESK         BIT(24)
+/* Serial EEPROM Chip Select Output (EECS) */
+#define PLX_CNTRL_EECS         BIT(25)
+/* Serial EEPROM Data Write Bit (EEDI (sic)) */
+#define PLX_CNTRL_EEWB         BIT(26)
+/* Serial EEPROM Data Read Bit (EEDO (sic)) (read-only) */
+#define PLX_CNTRL_EERB         BIT(27)
+/* Serial EEPROM Present (read-only) */
+#define PLX_CNTRL_EEPRESENT    BIT(28)
+/* Reload Configuration Registers from EEPROM */
+#define PLX_CNTRL_EERELOAD     BIT(29)
+/* PCI Adapter Software Reset (asserts LRESETo#) */
+#define PLX_CNTRL_RESET                BIT(30)
+/* Local Init Status (read-only) */
+#define PLX_CNTRL_INITDONE     BIT(31)
 /*
- * Any attempt to execute an unimplement command results in the PUTS
- * interface executing a NOOP and continuing as if the offending command
- * completed normally.  Note: this supplies a simple method to interrogate
- * mailbox command processing functionality.
+ * Combined command code stuff for convenience.
  */
+#define PLX_CNTRL_CC_MASK      \
+       (PLX_CNTRL_CCRDMA_MASK | PLX_CNTRL_CCWDMA_MASK | \
+        PLX_CNTRL_CCRDM_MASK | PLX_CNTRL_CCWDM_MASK)
+#define PLX_CNTRL_CC_NORMAL    \
+       (PLX_CNTRL_CCRDMA_NORMAL | PLX_CNTRL_CCWDMA_NORMAL | \
+        PLX_CNTRL_CCRDM_NORMAL | PLX_CNTRL_CCWDM_NORMAL) /* val after reset */
+
+/* PCI Permanent Configuration ID Register (hard-coded PLX vendor and device) */
+#define PLX_REG_PCIHIDR                0x0070
+
+/* Hard-coded ID for PLX PCI 9080 */
+#define PLX_PCIHIDR_9080       0x908010b5
+
+/* PCI Permanent Revision ID Register (hard-coded silicon revision) (8-bit). */
+#define PLX_REG_PCIHREV                0x0074
+
+/* DMA Channel N Mode Register (N <= 1) */
+#define PLX_REG_DMAMODE(n)     ((n) ? PLX_REG_DMAMODE1 : PLX_REG_DMAMODE0)
+#define PLX_REG_DMAMODE0       0x0080
+#define PLX_REG_DMAMODE1       0x0094
+
+/* Local Bus Width */
+#define PLX_DMAMODE_WIDTH8     (BIT(0) * 0)    /* 8 bits wide */
+#define PLX_DMAMODE_WIDTH16    (BIT(0) * 1)    /* 16 bits wide */
+#define PLX_DMAMODE_WIDTH32    (BIT(0) * 2)    /* 32 bits wide */
+#define PLX_DMAMODE_WIDTH32A   (BIT(0) * 3)    /* 32 bits wide */
+#define PLX_DMAMODE_WIDTH_MASK GENMASK(1, 0)
+#define PLX_DMAMODE_WIDTH_SHIFT        0
+/* Internal Wait States */
+#define PLX_DMAMODE_IWS(x)     (BIT(2) * ((x) & 0xf))
+#define PLX_DMAMODE_IWS_MASK   GENMASK(5, 2)
+#define PLX_DMAMODE_SHIFT      2
+/* Ready Input Enable */
+#define PLX_DMAMODE_READYIEN   BIT(6)
+/* BTERM# Input Enable */
+#define PLX_DMAMODE_BTERMIEN   BIT(7)
+/* Local Burst Enable */
+#define PLX_DMAMODE_BURSTEN    BIT(8)
+/* Chaining Enable */
+#define PLX_DMAMODE_CHAINEN    BIT(9)
+/* Done Interrupt Enable */
+#define PLX_DMAMODE_DONEIEN    BIT(10)
+/* Hold Local Address Constant */
+#define PLX_DMAMODE_LACONST    BIT(11)
+/* Demand Mode */
+#define PLX_DMAMODE_DEMAND     BIT(12)
+/* Write And Invalidate Mode */
+#define PLX_DMAMODE_WINVALIDATE        BIT(13)
+/* DMA EOT Enable - enables EOT0# or EOT1# input pin */
+#define PLX_DMAMODE_EOTEN      BIT(14)
+/* DMA Stop Data Transfer Mode - 0:BLAST; 1:EOT asserted or DREQ deasserted */
+#define PLX_DMAMODE_STOP       BIT(15)
+/* DMA Clear Count Mode - count in descriptor cleared on completion */
+#define PLX_DMAMODE_CLRCOUNT   BIT(16)
+/* DMA Channel Interrupt Select - 0:local bus interrupt; 1:PCI interrupt */
+#define PLX_DMAMODE_INTRPCI    BIT(17)
+
+/* DMA Channel N PCI Address Register (N <= 1) */
+#define PLX_REG_DMAPADR(n)     ((n) ? PLX_REG_DMAPADR1 : PLX_REG_DMAPADR0)
+#define PLX_REG_DMAPADR0       0x0084
+#define PLX_REG_DMAPADR1       0x0098
+
+/* DMA Channel N Local Address Register (N <= 1) */
+#define PLX_REG_DMALADR(n)     ((n) ? PLX_REG_DMALADR1 : PLX_REG_DMALADR0)
+#define PLX_REG_DMALADR0       0x0088
+#define PLX_REG_DMALADR1       0x009c
+
+/* DMA Channel N Transfer Size (Bytes) Register (N <= 1) (first 23 bits) */
+#define PLX_REG_DMASIZ(n)      ((n) ? PLX_REG_DMASIZ1 : PLX_REG_DMASIZ0)
+#define PLX_REG_DMASIZ0                0x008c
+#define PLX_REG_DMASIZ1                0x00a0
+
+/* DMA Channel N Descriptor Pointer Register (N <= 1) */
+#define PLX_REG_DMADPR(n)      ((n) ? PLX_REG_DMADPR1 : PLX_REG_DMADPR0)
+#define PLX_REG_DMADPR0                0x0090
+#define PLX_REG_DMADPR1                0x00a4
+
+/* Descriptor Located In PCI Address Space (not local address space) */
+#define PLX_DMADPR_DESCPCI     BIT(0)
+/* End Of Chain */
+#define PLX_DMADPR_CHAINEND    BIT(1)
+/* Interrupt After Terminal Count */
+#define PLX_DMADPR_TCINTR      BIT(2)
+/* Direction Of Transfer Local Bus To PCI (not PCI to local) */
+#define PLX_DMADPR_XFERL2P     BIT(3)
+/* Next Descriptor Address Bits 31:4 (16 byte boundary) */
+#define PLX_DMADPR_NEXT_MASK   GENMASK(31, 4)
+
+/* DMA Channel N Command/Status Register (N <= 1) (8-bit) */
+#define PLX_REG_DMACSR(n)      ((n) ? PLX_REG_DMACSR1 : PLX_REG_DMACSR0)
+#define PLX_REG_DMACSR0                0x00a8
+#define PLX_REG_DMACSR1                0x00a9
+
+/* Channel Enable */
+#define PLX_DMACSR_ENABLE      BIT(0)
+/* Channel Start - write 1 to start transfer (write-only) */
+#define PLX_DMACSR_START       BIT(1)
+/* Channel Abort - write 1 to abort transfer (write-only) */
+#define PLX_DMACSR_ABORT       BIT(2)
+/* Clear Interrupt - write 1 to clear DMA Channel Interrupt (write-only) */
+#define PLX_DMACSR_CLEARINTR   BIT(3)
+/* Channel Done - transfer complete/inactive (read-only) */
+#define PLX_DMACSR_DONE                BIT(4)
+
+/* DMA Threshold Register */
+#define PLX_REG_DMATHR         0x00b0
 
-#define MBX_CMD_MASK       0xffff0000  /* PUTS Command Register bits */
-
-#define MBX_CMD_ABORTJ     0x85000000  /* abort and jump */
-#define MBX_CMD_RESETP     0x86000000  /* reset and pause at start */
-#define MBX_CMD_PAUSE      0x87000000  /* pause immediately */
-#define MBX_CMD_PAUSEC     0x88000000  /* pause on completion */
-#define MBX_CMD_RESUME     0x89000000  /* resume operation */
-#define MBX_CMD_STEP       0x8a000000  /* single step tests */
-
-#define MBX_CMD_BSWAP      0x8c000000  /* identify byte swap scheme */
-#define MBX_CMD_BSWAP_0    0x8c000000  /* use scheme 0 */
-#define MBX_CMD_BSWAP_1    0x8c000001  /* use scheme 1 */
-
-/* setup host memory access window size */
-#define MBX_CMD_SETHMS     0x8d000000
-/* setup host memory access base address */
-#define MBX_CMD_SETHBA     0x8e000000
-/* perform memory setup and continue (IE. Done) */
-#define MBX_CMD_MGO        0x8f000000
-#define MBX_CMD_NOOP       0xFF000000  /* dummy, illegal command */
-
-/*****************************************/
-/***    MAILBOX #2  -  MEMORY SIZE     ***/
-/*****************************************/
-
-#define MBX_MEMSZ_MASK     0xffff0000  /* PUTS Memory Size Register bits */
-
-#define MBX_MEMSZ_128KB    0x00020000  /* 128 kilobyte board */
-#define MBX_MEMSZ_256KB    0x00040000  /* 256 kilobyte board */
-#define MBX_MEMSZ_512KB    0x00080000  /* 512 kilobyte board */
-#define MBX_MEMSZ_1MB      0x00100000  /* 1 megabyte board */
-#define MBX_MEMSZ_2MB      0x00200000  /* 2 megabyte board */
-#define MBX_MEMSZ_4MB      0x00400000  /* 4 megabyte board */
-#define MBX_MEMSZ_8MB      0x00800000  /* 8 megabyte board */
-#define MBX_MEMSZ_16MB     0x01000000  /* 16 megabyte board */
-
-/***************************************/
-/***    MAILBOX #2  -  BOARD TYPE    ***/
-/***************************************/
-
-#define MBX_BTYPE_MASK          0x0000ffff     /* PUTS Board Type Register */
-/* PUTS Board Family Register */
-#define MBX_BTYPE_FAMILY_MASK   0x0000ff00
-#define MBX_BTYPE_SUBTYPE_MASK  0x000000ff     /* PUTS Board Subtype */
-
-#define MBX_BTYPE_PLX9060       0x00000100     /* PLX family type */
-#define MBX_BTYPE_PLX9080       0x00000300     /* PLX wanXL100s family type */
-
-#define MBX_BTYPE_WANXL_4       0x00000104     /* wanXL400, 4-port */
-#define MBX_BTYPE_WANXL_2       0x00000102     /* wanXL200, 2-port */
-#define MBX_BTYPE_WANXL_1s      0x00000301     /* wanXL100s, 1-port */
-#define MBX_BTYPE_WANXL_1t      0x00000401     /* wanXL100T1, 1-port */
+/*
+ * DMA Threshold constraints:
+ * (C0PLAF + 1) + (C0PLAE + 1) <= 32
+ * (C0LPAF + 1) + (C0LPAE + 1) <= 32
+ * (C1PLAF + 1) + (C1PLAE + 1) <= 16
+ * (C1LPAF + 1) + (C1LPAE + 1) <= 16
+ */
 
-/*****************************************/
-/***    MAILBOX #3  -  SHMQ MAILBOX    ***/
-/*****************************************/
+/* DMA Channel 0 PCI-to-Local Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C0PLAF(x)   (BIT(0) * ((x) & 0xf))
+#define PLX_DMATHR_C0PLAF_MASK GENMASK(3, 0)
+#define PLX_DMATHR_C0PLAF_SHIFT        0
+/* DMA Channel 0 Local-to-PCI Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C0LPAE(x)   (BIT(4) * ((x) & 0xf))
+#define PLX_DMATHR_C0LPAE_MASK GENMASK(7, 4)
+#define PLX_DMATHR_C0LPAE_SHIFT        4
+/* DMA Channel 0 Local-to-PCI Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C0LPAF(x)   (BIT(8) * ((x) & 0xf))
+#define PLX_DMATHR_C0LPAF_MASK GENMASK(11, 8)
+#define PLX_DMATHR_C0LPAF_SHIFT        8
+/* DMA Channel 0 PCI-to-Local Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C0PLAE(x)   (BIT(12) * ((x) & 0xf))
+#define PLX_DMATHR_C0PLAE_MASK GENMASK(15, 12)
+#define PLX_DMATHR_C0PLAE_SHIFT        12
+/* DMA Channel 1 PCI-to-Local Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C1PLAF(x)   (BIT(16) * ((x) & 0xf))
+#define PLX_DMATHR_C1PLAF_MASK GENMASK(19, 16)
+#define PLX_DMATHR_C1PLAF_SHIFT        16
+/* DMA Channel 1 Local-to-PCI Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C1LPAE(x)   (BIT(20) * ((x) & 0xf))
+#define PLX_DMATHR_C1LPAE_MASK GENMASK(23, 20)
+#define PLX_DMATHR_C1LPAE_SHIFT        20
+/* DMA Channel 1 Local-to-PCI Almost Full (divided by 2, minus 1) */
+#define PLX_DMATHR_C1LPAF(x)   (BIT(24) * ((x) & 0xf))
+#define PLX_DMATHR_C1LPAF_MASK GENMASK(27, 24)
+#define PLX_DMATHR_C1LPAF_SHIFT        24
+/* DMA Channel 1 PCI-to-Local Almost Empty (divided by 2, minus 1) */
+#define PLX_DMATHR_C1PLAE(x)   (BIT(28) * ((x) & 0xf))
+#define PLX_DMATHR_C1PLAE_MASK GENMASK(31, 28)
+#define PLX_DMATHR_C1PLAE_SHIFT        28
 
-#define MBX_SMBX_MASK           0x000000ff     /* PUTS SHMQ Mailbox bits */
+/*
+ * Messaging Queue Registers OPLFIS, OPLFIM, IQP, OQP, MQCR, QBAR, IFHPR,
+ * IFTPR, IPHPR, IPTPR, OFHPR, OFTPR, OPHPR, OPTPR, and QSR have been omitted.
+ * They are used by the I2O feature.  (IQP and OQP occupy the usual offsets of
+ * the MBOX0 and MBOX1 registers if the I2O feature is enabled, but MBOX0 and
+ * MBOX1 are accessible via alternative offsets.
+ */
 
-/***************************************/
-/***    GENERIC HOST-SIDE DRIVER     ***/
-/***************************************/
+/* Queue Status/Control Register */
+#define PLX_REG_QSR            0x00e8
 
-#define MBX_ERR    0
-#define MBX_OK     1
+/* Value of QSR after reset - disables I2O feature completely. */
+#define PLX_QSR_VALUE_AFTER_RESET      0x00000050
 
-/* mailbox check routine - type of testing */
-#define MBXCHK_STS      0x00   /* check for PUTS status */
-#define MBXCHK_NOWAIT   0x01   /* dont care about PUTS status */
+/*
+ * Accesses near the end of memory can cause the PLX chip
+ * to pre-fetch data off of end-of-ram.  Limit the size of
+ * memory so host-side accesses cannot occur.
+ */
 
-/* system allocates this many bytes for address mapping mailbox space */
-#define MBX_ADDR_SPACE_360 0x80        /* wanXL100s/200/400 */
-#define MBX_ADDR_MASK_360 (MBX_ADDR_SPACE_360 - 1)
+#define PLX_PREFETCH   32
 
+/**
+ * plx9080_abort_dma - Abort a PLX PCI 9080 DMA transfer
+ * @iobase:    Remapped base address of configuration registers.
+ * @channel:   DMA channel number (0 or 1).
+ *
+ * Aborts the DMA transfer on the channel, which must have been enabled
+ * and started beforehand.
+ *
+ * Return:
+ *     %0 on success.
+ *     -%ETIMEDOUT if timed out waiting for abort to complete.
+ */
 static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
 {
        void __iomem *dma_cs_addr;
@@ -421,29 +631,26 @@ static inline int plx9080_abort_dma(void __iomem *iobase, unsigned int channel)
        const int timeout = 10000;
        unsigned int i;
 
-       if (channel)
-               dma_cs_addr = iobase + PLX_DMA1_CS_REG;
-       else
-               dma_cs_addr = iobase + PLX_DMA0_CS_REG;
+       dma_cs_addr = iobase + PLX_REG_DMACSR(channel);
 
-       /*  abort dma transfer if necessary */
+       /* abort dma transfer if necessary */
        dma_status = readb(dma_cs_addr);
-       if ((dma_status & PLX_DMA_EN_BIT) == 0)
+       if ((dma_status & PLX_DMACSR_ENABLE) == 0)
                return 0;
 
-       /*  wait to make sure done bit is zero */
-       for (i = 0; (dma_status & PLX_DMA_DONE_BIT) && i < timeout; i++) {
+       /* wait to make sure done bit is zero */
+       for (i = 0; (dma_status & PLX_DMACSR_DONE) && i < timeout; i++) {
                udelay(1);
                dma_status = readb(dma_cs_addr);
        }
        if (i == timeout)
                return -ETIMEDOUT;
 
-       /*  disable and abort channel */
-       writeb(PLX_DMA_ABORT_BIT, dma_cs_addr);
-       /*  wait for dma done bit */
+       /* disable and abort channel */
+       writeb(PLX_DMACSR_ABORT, dma_cs_addr);
+       /* wait for dma done bit */
        dma_status = readb(dma_cs_addr);
-       for (i = 0; (dma_status & PLX_DMA_DONE_BIT) == 0 && i < timeout; i++) {
+       for (i = 0; (dma_status & PLX_DMACSR_DONE) == 0 && i < timeout; i++) {
                udelay(1);
                dma_status = readb(dma_cs_addr);
        }
index e9e43139157d93b33d2db89f90adc208972bf177..802f51e464050c36db7ae672e116273a5083f5ff 100644 (file)
@@ -643,7 +643,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
        outb(0, dev->iobase + DAQP_AUX_REG);
 
        for (i = 0; i > insn->n; i++) {
-               unsigned val = data[i];
+               unsigned int val = data[i];
                int ret;
 
                /* D/A transfer rate is about 8ms */
index 9b6c567732479a2b95c177250b6c1ba3b42bbdf2..e00e9c6268aef7d3d2999cc91259c3a849a1814f 100644 (file)
@@ -362,7 +362,7 @@ struct rtd_private {
        long ai_count;          /* total transfer size (samples) */
        int xfer_count;         /* # to transfer data. 0->1/2FIFO */
        int flags;              /* flag event modes */
-       unsigned fifosz;
+       unsigned int fifosz;
 
        /* 8254 Timer/Counter gate and clock sources */
        unsigned char timer_gate_src[3];
@@ -491,9 +491,9 @@ static void rtd_load_channelgain_list(struct comedi_device *dev,
 static int rtd520_probe_fifo_depth(struct comedi_device *dev)
 {
        unsigned int chanspec = CR_PACK(0, 0, AREF_GROUND);
-       unsigned i;
-       static const unsigned limit = 0x2000;
-       unsigned fifo_size = 0;
+       unsigned int i;
+       static const unsigned int limit = 0x2000;
+       unsigned int fifo_size = 0;
 
        writel(0, dev->mmio + LAS0_ADC_FIFO_CLEAR);
        rtd_load_channelgain_list(dev, 1, &chanspec);
@@ -501,7 +501,7 @@ static int rtd520_probe_fifo_depth(struct comedi_device *dev)
        writel(0, dev->mmio + LAS0_ADC_CONVERSION);
        /* convert  samples */
        for (i = 0; i < limit; ++i) {
-               unsigned fifo_status;
+               unsigned int fifo_status;
                /* trigger conversion */
                writew(0, dev->mmio + LAS0_ADC);
                usleep_range(1, 1000);
@@ -1175,7 +1175,7 @@ static void rtd_reset(struct comedi_device *dev)
 
        writel(0, dev->mmio + LAS0_BOARD_RESET);
        usleep_range(100, 1000);        /* needed? */
-       writel(0, devpriv->lcfg + PLX_INTRCS_REG);
+       writel(0, devpriv->lcfg + PLX_REG_INTCSR);
        writew(0, dev->mmio + LAS0_IT);
        writew(~0, dev->mmio + LAS0_CLEAR);
        readw(dev->mmio + LAS0_CLEAR);
@@ -1316,7 +1316,8 @@ static int rtd_auto_attach(struct comedi_device *dev,
        devpriv->fifosz = ret;
 
        if (dev->irq)
-               writel(ICS_PIE | ICS_PLIE, devpriv->lcfg + PLX_INTRCS_REG);
+               writel(PLX_INTCSR_PIEN | PLX_INTCSR_PLIEN,
+                      devpriv->lcfg + PLX_REG_INTCSR);
 
        return 0;
 }
index c5e08635e01e3b9d10f9d643a738d81e0e8b5989..4a87b4b52400a23e9788b39977fd21f2629d13c4 100644 (file)
@@ -708,7 +708,7 @@ static uint16_t s626_get_mode_a(struct comedi_device *dev,
        uint16_t cra;
        uint16_t crb;
        uint16_t setup;
-       unsigned cntsrc, clkmult, clkpol, encmode;
+       unsigned int cntsrc, clkmult, clkpol, encmode;
 
        /* Fetch CRA and CRB register images. */
        cra = s626_debi_read(dev, S626_LP_CRA(chan));
@@ -763,7 +763,7 @@ static uint16_t s626_get_mode_b(struct comedi_device *dev,
        uint16_t cra;
        uint16_t crb;
        uint16_t setup;
-       unsigned cntsrc, clkmult, clkpol, encmode;
+       unsigned int cntsrc, clkmult, clkpol, encmode;
 
        /* Fetch CRA and CRB register images. */
        cra = s626_debi_read(dev, S626_LP_CRA(chan));
@@ -838,7 +838,7 @@ static void s626_set_mode_a(struct comedi_device *dev,
        struct s626_private *devpriv = dev->private;
        uint16_t cra;
        uint16_t crb;
-       unsigned cntsrc, clkmult, clkpol;
+       unsigned int cntsrc, clkmult, clkpol;
 
        /* Initialize CRA and CRB images. */
        /* Preload trigger is passed through. */
@@ -916,7 +916,7 @@ static void s626_set_mode_b(struct comedi_device *dev,
        struct s626_private *devpriv = dev->private;
        uint16_t cra;
        uint16_t crb;
-       unsigned cntsrc, clkmult, clkpol;
+       unsigned int cntsrc, clkmult, clkpol;
 
        /* Initialize CRA and CRB images. */
        /* IndexSrc is passed through. */
index b83424e7507badb6336ccd89debf0d855b231340..6a00a64c6f3ac8045a24f9d1b52fb5f7acc847d6 100644 (file)
 #define S626_ENCODER_CHANNELS   6
 #define S626_DIO_CHANNELS       48
 #define S626_DIO_BANKS         3       /* Number of DIO groups. */
-#define S626_DIO_EXTCHANS      40      /* Number of extended-capability
-                                        * DIO channels. */
+#define S626_DIO_EXTCHANS      40      /*
+                                        * Number of extended-capability
+                                        * DIO channels.
+                                        */
 
 #define S626_NUM_TRIMDACS      12      /* Number of valid TrimDAC channels. */
 
 #define S626_GSEL_BIPOLAR10V   0x00A0  /* S626_LP_GSEL setting 10V bipolar. */
 
 /* Error codes that must be visible to this base class. */
-#define S626_ERR_ILLEGAL_PARM  0x00010000      /* Illegal function parameter
-                                                * value was specified. */
+#define S626_ERR_ILLEGAL_PARM  0x00010000      /*
+                                                * Illegal function parameter
+                                                * value was specified.
+                                                */
 #define S626_ERR_I2C           0x00020000      /* I2C error. */
-#define S626_ERR_COUNTERSETUP  0x00200000      /* Illegal setup specified for
-                                                * counter channel. */
+#define S626_ERR_COUNTERSETUP  0x00200000      /*
+                                                * Illegal setup specified for
+                                                * counter channel.
+                                                */
 #define S626_ERR_DEBI_TIMEOUT  0x00400000      /* DEBI transfer timed out. */
 
 /*
  * Organization (physical order) and size (in DWORDs) of logical DMA buffers
  * contained by ANA_DMABUF.
  */
-#define S626_ADC_DMABUF_DWORDS 40      /* ADC DMA buffer must hold 16 samples,
-                                        * plus pre/post garbage samples. */
-#define S626_DAC_WDMABUF_DWORDS        1       /* DAC output DMA buffer holds a single
-                                        * sample. */
+#define S626_ADC_DMABUF_DWORDS 40      /*
+                                        * ADC DMA buffer must hold 16 samples,
+                                        * plus pre/post garbage samples.
+                                        */
+#define S626_DAC_WDMABUF_DWORDS        1       /*
+                                        * DAC output DMA buffer holds a single
+                                        * sample.
+                                        */
 
 /* All remaining space in 4KB DMA buffer is available for the RPS1 program. */
 
 #define S626_RPS_IRQ           0x60000000      /* IRQ */
 
 #define S626_RPS_LOGICAL_OR    0x08000000      /* Logical OR conditionals. */
-#define S626_RPS_INVERT                0x04000000      /* Test for negated
-                                                * semaphores. */
+#define S626_RPS_INVERT                0x04000000      /*
+                                                * Test for negated
+                                                * semaphores.
+                                                */
 #define S626_RPS_DEBI          0x00000002      /* DEBI done */
 
-#define S626_RPS_SIG0          0x00200000      /* RPS semaphore 0
-                                                * (used by ADC). */
-#define S626_RPS_SIG1          0x00400000      /* RPS semaphore 1
-                                                * (used by DAC). */
-#define S626_RPS_SIG2          0x00800000      /* RPS semaphore 2
-                                                * (not used). */
+#define S626_RPS_SIG0          0x00200000      /*
+                                                * RPS semaphore 0
+                                                * (used by ADC).
+                                                */
+#define S626_RPS_SIG1          0x00400000      /*
+                                                * RPS semaphore 1
+                                                * (used by DAC).
+                                                */
+#define S626_RPS_SIG2          0x00800000      /*
+                                                * RPS semaphore 2
+                                                * (not used).
+                                                */
 #define S626_RPS_GPIO2         0x00080000      /* RPS GPIO2 */
 #define S626_RPS_GPIO3         0x00100000      /* RPS GPIO3 */
 
-#define S626_RPS_SIGADC                S626_RPS_SIG0   /* Trigger/status for
-                                                * ADC's RPS program. */
-#define S626_RPS_SIGDAC                S626_RPS_SIG1   /* Trigger/status for
-                                                * DAC's RPS program. */
+#define S626_RPS_SIGADC                S626_RPS_SIG0   /*
+                                                * Trigger/status for
+                                                * ADC's RPS program.
+                                                */
+#define S626_RPS_SIGDAC                S626_RPS_SIG1   /*
+                                                * Trigger/status for
+                                                * DAC's RPS program.
+                                                */
 
 /* RPS clock parameters. */
-#define S626_RPSCLK_SCALAR     8       /* This is apparent ratio of
-                                        * PCI/RPS clks (undocumented!!). */
+#define S626_RPSCLK_SCALAR     8       /*
+                                        * This is apparent ratio of
+                                        * PCI/RPS clks (undocumented!!).
+                                        */
 #define S626_RPSCLK_PER_US     (33 / S626_RPSCLK_SCALAR)
-                                       /* Number of RPS clocks in one
-                                        * microsecond. */
+                                       /*
+                                        * Number of RPS clocks in one
+                                        * microsecond.
+                                        */
 
 /* Event counter source addresses. */
 #define S626_SBA_RPS_A0                0x27    /* Time of RPS0 busy, in PCI clocks. */
 
 /* GPIO constants. */
-#define S626_GPIO_BASE         0x10004000      /* GPIO 0,2,3 = inputs,
-                                                * GPIO3 = IRQ; GPIO1 = out. */
+#define S626_GPIO_BASE         0x10004000      /*
+                                                * GPIO 0,2,3 = inputs,
+                                                * GPIO3 = IRQ; GPIO1 = out.
+                                                */
 #define S626_GPIO1_LO          0x00000000      /* GPIO1 set to LOW. */
 #define S626_GPIO1_HI          0x00001000      /* GPIO1 set to HIGH. */
 
 /* Primary Status Register (PSR) constants. */
 #define S626_PSR_DEBI_E                0x00040000      /* DEBI event flag. */
 #define S626_PSR_DEBI_S                0x00080000      /* DEBI status flag. */
-#define S626_PSR_A2_IN         0x00008000      /* Audio output DMA2 protection
-                                                * address reached. */
-#define S626_PSR_AFOU          0x00000800      /* Audio FIFO under/overflow
-                                                * detected. */
-#define S626_PSR_GPIO2         0x00000020      /* GPIO2 input pin: 0=AdcBusy,
-                                                * 1=AdcIdle. */
-#define S626_PSR_EC0S          0x00000001      /* Event counter 0 threshold
-                                                * reached. */
+#define S626_PSR_A2_IN         0x00008000      /*
+                                                * Audio output DMA2 protection
+                                                * address reached.
+                                                */
+#define S626_PSR_AFOU          0x00000800      /*
+                                                * Audio FIFO under/overflow
+                                                * detected.
+                                                */
+#define S626_PSR_GPIO2         0x00000020      /*
+                                                * GPIO2 input pin: 0=AdcBusy,
+                                                * 1=AdcIdle.
+                                                */
+#define S626_PSR_EC0S          0x00000001      /*
+                                                * Event counter 0 threshold
+                                                * reached.
+                                                */
 
 /* Secondary Status Register (SSR) constants. */
-#define S626_SSR_AF2_OUT       0x00000200      /* Audio 2 output FIFO
-                                                * under/overflow detected. */
+#define S626_SSR_AF2_OUT       0x00000200      /*
+                                                * Audio 2 output FIFO
+                                                * under/overflow detected.
+                                                */
 
 /* Master Control Register 1 (MC1) constants. */
 #define S626_MC1_SOFT_RESET    0x80000000      /* Invoke 7146 soft reset. */
-#define S626_MC1_SHUTDOWN      0x3FFF0000      /* Shut down all MC1-controlled
-                                                * enables. */
+#define S626_MC1_SHUTDOWN      0x3FFF0000      /*
+                                                * Shut down all MC1-controlled
+                                                * enables.
+                                                */
 
 #define S626_MC1_ERPS1         0x2000  /* Enab/disable RPS task 1. */
 #define S626_MC1_ERPS0         0x1000  /* Enab/disable RPS task 0. */
 #define S626_P_DEBIAD          0x0088  /* DEBI target address. */
 #define S626_P_I2CCTRL         0x008C  /* I2C control. */
 #define S626_P_I2CSTAT         0x0090  /* I2C status. */
-#define S626_P_BASEA2_IN       0x00AC  /* Audio input 2 base physical DMAbuf
-                                        * address. */
-#define S626_P_PROTA2_IN       0x00B0  /* Audio input 2 physical DMAbuf
-                                        * protection address. */
+#define S626_P_BASEA2_IN       0x00AC  /*
+                                        * Audio input 2 base physical DMAbuf
+                                        * address.
+                                        */
+#define S626_P_PROTA2_IN       0x00B0  /*
+                                        * Audio input 2 physical DMAbuf
+                                        * protection address.
+                                        */
 #define S626_P_PAGEA2_IN       0x00B4  /* Audio input 2 paging attributes. */
-#define S626_P_BASEA2_OUT      0x00B8  /* Audio output 2 base physical DMAbuf
-                                        * address. */
-#define S626_P_PROTA2_OUT      0x00BC  /* Audio output 2 physical DMAbuf
-                                        * protection address. */
+#define S626_P_BASEA2_OUT      0x00B8  /*
+                                        * Audio output 2 base physical DMAbuf
+                                        * address.
+                                        */
+#define S626_P_PROTA2_OUT      0x00BC  /*
+                                        * Audio output 2 physical DMAbuf
+                                        * protection address.
+                                        */
 #define S626_P_PAGEA2_OUT      0x00C0  /* Audio output 2 paging attributes. */
 #define S626_P_RPSPAGE0                0x00C4  /* RPS0 page. */
 #define S626_P_RPSPAGE1                0x00C8  /* RPS1 page. */
 #define S626_P_PSR             0x0110  /* Primary status. */
 #define S626_P_SSR             0x0114  /* Secondary status. */
 #define S626_P_EC1R            0x0118  /* Event counter set 1. */
-#define S626_P_ADP4            0x0138  /* Logical audio DMA pointer of audio
-                                        * input FIFO A2_IN. */
+#define S626_P_ADP4            0x0138  /*
+                                        * Logical audio DMA pointer of audio
+                                        * input FIFO A2_IN.
+                                        */
 #define S626_P_FB_BUFFER1      0x0144  /* Audio feedback buffer 1. */
 #define S626_P_FB_BUFFER2      0x0148  /* Audio feedback buffer 2. */
 #define S626_P_TSL1            0x0180  /* Audio time slot list 1. */
 #define S626_LP_RDMISC2                0x0082  /* Read Misc2. */
 
 /* Bit masks for MISC1 register that are the same for reads and writes. */
-#define S626_MISC1_WENABLE     0x8000  /* enab writes to MISC2 (except Clear
-                                        * Watchdog bit). */
+#define S626_MISC1_WENABLE     0x8000  /*
+                                        * enab writes to MISC2 (except Clear
+                                        * Watchdog bit).
+                                        */
 #define S626_MISC1_WDISABLE    0x0000  /* Disable writes to MISC2. */
-#define S626_MISC1_EDCAP       0x1000  /* Enable edge capture on DIO chans
-                                        * specified by S626_LP_WRCAPSELx. */
-#define S626_MISC1_NOEDCAP     0x0000  /* Disable edge capture on specified
-                                        * DIO chans. */
+#define S626_MISC1_EDCAP       0x1000  /*
+                                        * Enable edge capture on DIO chans
+                                        * specified by S626_LP_WRCAPSELx.
+                                        */
+#define S626_MISC1_NOEDCAP     0x0000  /*
+                                        * Disable edge capture on specified
+                                        * DIO chans.
+                                        */
 
 /* Bit masks for MISC1 register reads. */
 #define S626_RDMISC1_WDTIMEOUT 0x4000  /* Watchdog timer timed out. */
 #define S626_A1_RUN            0x20000000      /* Run A1 based on TSL1. */
 #define S626_A1_SWAP           0x00200000      /* Use big-endian for A1. */
 #define S626_A2_SWAP           0x00100000      /* Use big-endian for A2. */
-#define S626_WS_MODES          0x00019999      /* WS0 = TSL1 trigger input,
-                                                * WS1-WS4 = CS* outputs. */
-
-#if S626_PLATFORM == S626_INTEL                /* Base ACON1 config: always run
-                                        * A1 based on TSL1. */
+#define S626_WS_MODES          0x00019999      /*
+                                                * WS0 = TSL1 trigger input,
+                                                * WS1-WS4 = CS* outputs.
+                                                */
+
+#if S626_PLATFORM == S626_INTEL                /*
+                                        * Base ACON1 config: always run
+                                        * A1 based on TSL1.
+                                        */
 #define S626_ACON1_BASE                (S626_WS_MODES | S626_A1_RUN)
 #elif S626_PLATFORM == S626_MOTOROLA
 #define S626_ACON1_BASE                \
        (S626_WS_MODES | S626_A1_RUN | S626_A1_SWAP | S626_A2_SWAP)
 #endif
 
-#define S626_ACON1_ADCSTART    S626_ACON1_BASE /* Start ADC: run A1
-                                                * based on TSL1. */
+#define S626_ACON1_ADCSTART    S626_ACON1_BASE /*
+                                                * Start ADC: run A1
+                                                * based on TSL1.
+                                                */
 #define S626_ACON1_DACSTART    (S626_ACON1_BASE | S626_A2_RUN)
 /* Start transmit to DAC: run A2 based on TSL2. */
 #define S626_ACON1_DACSTOP     S626_ACON1_BASE /* Halt A2. */
 
 /* Bit masks for ACON2 register. */
 #define S626_A1_CLKSRC_BCLK1   0x00000000      /* A1 bit rate = BCLK1 (ADC). */
-#define S626_A2_CLKSRC_X1      0x00800000      /* A2 bit rate = ACLK/1
-                                                * (DACs). */
-#define S626_A2_CLKSRC_X2      0x00C00000      /* A2 bit rate = ACLK/2
-                                                * (DACs). */
-#define S626_A2_CLKSRC_X4      0x01400000      /* A2 bit rate = ACLK/4
-                                                * (DACs). */
+#define S626_A2_CLKSRC_X1      0x00800000      /*
+                                                * A2 bit rate = ACLK/1
+                                                * (DACs).
+                                                */
+#define S626_A2_CLKSRC_X2      0x00C00000      /*
+                                                * A2 bit rate = ACLK/2
+                                                * (DACs).
+                                                */
+#define S626_A2_CLKSRC_X4      0x01400000      /*
+                                                * A2 bit rate = ACLK/4
+                                                * (DACs).
+                                                */
 #define S626_INVERT_BCLK2      0x00100000      /* Invert BCLK2 (DACs). */
 #define S626_BCLK2_OE          0x00040000      /* Enable BCLK2 (DACs). */
-#define S626_ACON2_XORMASK     0x000C0000      /* XOR mask for ACON2
-                                                * active-low bits. */
+#define S626_ACON2_XORMASK     0x000C0000      /*
+                                                * XOR mask for ACON2
+                                                * active-low bits.
+                                                */
 
 #define S626_ACON2_INIT                (S626_ACON2_XORMASK ^ \
                                 (S626_A1_CLKSRC_BCLK1 | S626_A2_CLKSRC_X2 | \
 #define S626_WS3               0x10000000
 #define S626_WS4               0x08000000
 #define S626_RSD1              0x01000000      /* Shift A1 data in on SD1. */
-#define S626_SDW_A1            0x00800000      /* Store rcv'd char at next char
-                                                * slot of DWORD1 buffer. */
-#define S626_SIB_A1            0x00400000      /* Store rcv'd char at next
-                                                * char slot of FB1 buffer. */
-#define S626_SF_A1             0x00200000      /* Write unsigned long
-                                                * buffer to input FIFO. */
+#define S626_SDW_A1            0x00800000      /*
+                                                * Store rcv'd char at next char
+                                                * slot of DWORD1 buffer.
+                                                */
+#define S626_SIB_A1            0x00400000      /*
+                                                * Store rcv'd char at next
+                                                * char slot of FB1 buffer.
+                                                */
+#define S626_SF_A1             0x00200000      /*
+                                                * Write unsigned long
+                                                * buffer to input FIFO.
+                                                */
 
 /* Select parallel-to-serial converter's data source: */
 #define S626_XFIFO_0           0x00000000      /* Data fifo byte 0. */
 #define S626_XFB1              0x00000050      /* FB_BUFFER byte 1. */
 #define S626_XFB2              0x00000060      /* FB_BUFFER byte 2. */
 #define S626_XFB3              0x00000070      /* FB_BUFFER byte 3. */
-#define S626_SIB_A2            0x00000200      /* Store next dword from A2's
+#define S626_SIB_A2            0x00000200      /*
+                                                * Store next dword from A2's
                                                 * input shifter to FB2
-                                                * buffer. */
-#define S626_SF_A2             0x00000100      /* Store next dword from A2's
+                                                * buffer.
+                                                */
+#define S626_SF_A2             0x00000100      /*
+                                                * Store next dword from A2's
                                                 * input shifter to its input
-                                                * fifo. */
-#define S626_LF_A2             0x00000080      /* Load next dword from A2's
+                                                * fifo.
+                                                */
+#define S626_LF_A2             0x00000080      /*
+                                                * Load next dword from A2's
                                                 * output fifo into its
-                                                * output dword buffer. */
+                                                * output dword buffer.
+                                                */
 #define S626_XSD2              0x00000008      /* Shift data out on SD2. */
 #define S626_RSD3              0x00001800      /* Shift data in on SD3. */
 #define S626_RSD2              0x00001000      /* Shift data in on SD2. */
-#define S626_LOW_A2            0x00000002      /* Drive last SD low for 7 clks,
-                                                * then tri-state. */
+#define S626_LOW_A2            0x00000002      /*
+                                                * Drive last SD low for 7 clks,
+                                                * then tri-state.
+                                                */
 #define S626_EOS               0x00000001      /* End of superframe. */
 
 /* I2C configuration constants. */
-#define S626_I2C_CLKSEL                0x0400          /* I2C bit rate =
-                                                * PCIclk/480 = 68.75 KHz. */
-#define S626_I2C_BITRATE       68.75           /* I2C bus data bit rate
+#define S626_I2C_CLKSEL                0x0400          /*
+                                                * I2C bit rate =
+                                                * PCIclk/480 = 68.75 KHz.
+                                                */
+#define S626_I2C_BITRATE       68.75           /*
+                                                * I2C bus data bit rate
                                                 * (determined by
-                                                * S626_I2C_CLKSEL) in KHz. */
-#define S626_I2C_WRTIME                15.0            /* Worst case time, in msec,
+                                                * S626_I2C_CLKSEL) in KHz.
+                                                */
+#define S626_I2C_WRTIME                15.0            /*
+                                                * Worst case time, in msec,
                                                 * for EEPROM internal write
-                                                * op. */
+                                                * op.
+                                                */
 
 /* I2C manifest constants. */
 
 #define S626_I2C_B0(ATTR, VAL) (((ATTR) << 2) | ((VAL) <<  8))
 
 /* DEBI command constants. */
-#define S626_DEBI_CMD_SIZE16   (2 << 17)       /* Transfer size is always
-                                                * 2 bytes. */
+#define S626_DEBI_CMD_SIZE16   (2 << 17)       /*
+                                                * Transfer size is always
+                                                * 2 bytes.
+                                                */
 #define S626_DEBI_CMD_READ     0x00010000      /* Read operation. */
 #define S626_DEBI_CMD_WRITE    0x00000000      /* Write operation. */
 
 #define S626_DEBI_CMD_WRWORD   (S626_DEBI_CMD_WRITE | S626_DEBI_CMD_SIZE16)
 
 /* DEBI configuration constants. */
-#define S626_DEBI_CFG_XIRQ_EN  0x80000000      /* Enable external interrupt
-                                                * on GPIO3. */
+#define S626_DEBI_CFG_XIRQ_EN  0x80000000      /*
+                                                * Enable external interrupt
+                                                * on GPIO3.
+                                                */
 #define S626_DEBI_CFG_XRESUME  0x40000000      /* Resume block */
-                                               /* Transfer when XIRQ
-                                                * deasserted. */
+                                               /*
+                                                * Transfer when XIRQ
+                                                * deasserted.
+                                                */
 #define S626_DEBI_CFG_TOQ      0x03C00000      /* Timeout (15 PCI cycles). */
 #define S626_DEBI_CFG_FAST     0x10000000      /* Fast mode enable. */
 
 /* 4-bit field that specifies DEBI timeout value in PCI clock cycles: */
-#define S626_DEBI_CFG_TOUT_BIT 22      /* Finish DEBI cycle after this many
-                                        * clocks. */
+#define S626_DEBI_CFG_TOUT_BIT 22      /*
+                                        * Finish DEBI cycle after this many
+                                        * clocks.
+                                        */
 
 /* 2-bit field that specifies Endian byte lane steering: */
-#define S626_DEBI_CFG_SWAP_NONE        0x00000000      /* Straight - don't swap any
-                                                * bytes (Intel). */
+#define S626_DEBI_CFG_SWAP_NONE        0x00000000      /*
+                                                * Straight - don't swap any
+                                                * bytes (Intel).
+                                                */
 #define S626_DEBI_CFG_SWAP_2   0x00100000      /* 2-byte swap (Motorola). */
 #define S626_DEBI_CFG_SWAP_4   0x00200000      /* 4-byte swap. */
-#define S626_DEBI_CFG_SLAVE16  0x00080000      /* Slave is able to serve
-                                                * 16-bit cycles. */
-#define S626_DEBI_CFG_INC      0x00040000      /* Enable address increment
-                                                * for block transfers. */
+#define S626_DEBI_CFG_SLAVE16  0x00080000      /*
+                                                * Slave is able to serve
+                                                * 16-bit cycles.
+                                                */
+#define S626_DEBI_CFG_INC      0x00040000      /*
+                                                * Enable address increment
+                                                * for block transfers.
+                                                */
 #define S626_DEBI_CFG_INTEL    0x00020000      /* Intel style local bus. */
 #define S626_DEBI_CFG_TIMEROFF 0x00010000      /* Disable timer. */
 
 #if S626_PLATFORM == S626_INTEL
 
-#define S626_DEBI_TOUT         7       /* Wait 7 PCI clocks (212 ns) before
-                                        * polling RDY. */
+#define S626_DEBI_TOUT         7       /*
+                                        * Wait 7 PCI clocks (212 ns) before
+                                        * polling RDY.
+                                        */
 
 /* Intel byte lane steering (pass through all byte lanes). */
 #define S626_DEBI_SWAP         S626_DEBI_CFG_SWAP_NONE
 
 #elif S626_PLATFORM == S626_MOTOROLA
 
-#define S626_DEBI_TOUT         15      /* Wait 15 PCI clocks (454 ns) maximum
-                                        * before timing out. */
+#define S626_DEBI_TOUT         15      /*
+                                        * Wait 15 PCI clocks (454 ns) maximum
+                                        * before timing out.
+                                        */
 
 /* Motorola byte lane steering. */
 #define S626_DEBI_SWAP         S626_DEBI_CFG_SWAP_2
 
 /* LoadSrc values: */
 #define S626_LOADSRC_INDX      0       /* Preload core in response to Index. */
-#define S626_LOADSRC_OVER      1       /* Preload core in response to
-                                        * Overflow. */
-#define S626_LOADSRCB_OVERA    2       /* Preload B core in response to
-                                        * A Overflow. */
+#define S626_LOADSRC_OVER      1       /*
+                                        * Preload core in response to
+                                        * Overflow.
+                                        */
+#define S626_LOADSRCB_OVERA    2       /*
+                                        * Preload B core in response to
+                                        * A Overflow.
+                                        */
 #define S626_LOADSRC_NONE      3       /* Never preload core. */
 
 /* IntSrc values: */
 #define S626_CNTSRC_SYSCLK_DOWN        3       /* System clock down */
 
 /* ClkPol values: */
-#define S626_CLKPOL_POS                0       /* Counter/Extender clock is
-                                        * active high. */
-#define S626_CLKPOL_NEG                1       /* Counter/Extender clock is
-                                        * active low. */
+#define S626_CLKPOL_POS                0       /*
+                                        * Counter/Extender clock is
+                                        * active high.
+                                        */
+#define S626_CLKPOL_NEG                1       /*
+                                        * Counter/Extender clock is
+                                        * active low.
+                                        */
 #define S626_CNTDIR_UP         0       /* Timer counts up. */
 #define S626_CNTDIR_DOWN       1       /* Timer counts down. */
 
 
 /* Sanity-check limits for parameters. */
 
-#define S626_NUM_COUNTERS      6       /* Maximum valid counter
-                                        * logical channel number. */
+#define S626_NUM_COUNTERS      6       /*
+                                        * Maximum valid counter
+                                        * logical channel number.
+                                        */
 #define S626_NUM_INTSOURCES    4
 #define S626_NUM_LATCHSOURCES  4
 #define S626_NUM_CLKMULTS      4
index 7a1defcf210233e7c2d7a03ca290caddb5cc96ed..0d33e520f6350f599d8e2d3b7a6a4a2e561ce960 100644 (file)
@@ -95,7 +95,7 @@ struct serial_data {
 #define S2002_CFG_SIGN(x)              (((x) >> 13) & 0x1)
 #define S2002_CFG_BASE(x)              (((x) >> 14) & 0xfffff)
 
-static long serial2002_tty_ioctl(struct file *f, unsigned op,
+static long serial2002_tty_ioctl(struct file *f, unsigned int op,
                                 unsigned long param)
 {
        if (f->f_op->unlocked_ioctl)
@@ -379,7 +379,10 @@ static int serial2002_setup_subdevice(struct comedi_subdevice *s,
                                range_table_list[chan] =
                                    (const struct comedi_lrange *)&range[j];
                        }
-                       maxdata_list[chan] = ((long long)1 << cfg[j].bits) - 1;
+                       if (cfg[j].bits < 32)
+                               maxdata_list[chan] = (1u << cfg[j].bits) - 1;
+                       else
+                               maxdata_list[chan] = 0xffffffff;
                        chan++;
                }
        }
index c31fe1bca19119d8b77a4df0ebbbeaa76c0d9ca3..fe271fbd629bd6189bc3854fce3e54ad98594903 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
@@ -57,12 +57,14 @@ int dpbp_open(struct fsl_mc_io *mc_io,
              u16 *token)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_open *cmd_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN,
                                          cmd_flags, 0);
-       cmd.params[0] |= mc_enc(0, 32, dpbp_id);
+       cmd_params = (struct dpbp_cmd_open *)cmd.params;
+       cmd_params->dpbp_id = cpu_to_le32(dpbp_id);
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -70,7 +72,7 @@ int dpbp_open(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+       *token = mc_cmd_hdr_read_token(&cmd);
 
        return err;
 }
@@ -143,7 +145,7 @@ int dpbp_create(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+       *token = mc_cmd_hdr_read_token(&cmd);
 
        return 0;
 }
@@ -231,6 +233,7 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
                    int *en)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_rsp_is_enabled *rsp_params;
        int err;
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags,
@@ -242,7 +245,8 @@ int dpbp_is_enabled(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *en = (int)mc_dec(cmd.params[0], 0, 1);
+       rsp_params = (struct dpbp_rsp_is_enabled *)cmd.params;
+       *en = rsp_params->enabled & DPBP_ENABLE;
 
        return 0;
 }
@@ -286,14 +290,16 @@ int dpbp_set_irq(struct fsl_mc_io *mc_io,
                 struct dpbp_irq_cfg *irq_cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_set_irq *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 8, irq_index);
-       cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
-       cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr);
-       cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+       cmd_params = (struct dpbp_cmd_set_irq *)cmd.params;
+       cmd_params->irq_index = irq_index;
+       cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+       cmd_params->irq_addr = cpu_to_le64(irq_cfg->addr);
+       cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -319,12 +325,15 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
                 struct dpbp_irq_cfg *irq_cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_get_irq *cmd_params;
+       struct dpbp_rsp_get_irq *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpbp_cmd_get_irq *)cmd.params;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -332,10 +341,12 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
-       irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64);
-       irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
-       *type = (int)mc_dec(cmd.params[2], 32, 32);
+       rsp_params = (struct dpbp_rsp_get_irq *)cmd.params;
+       irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+       irq_cfg->addr = le64_to_cpu(rsp_params->irq_addr);
+       irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+       *type = le32_to_cpu(rsp_params->type);
+
        return 0;
 }
 
@@ -361,12 +372,14 @@ int dpbp_set_irq_enable(struct fsl_mc_io *mc_io,
                        u8 en)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_set_irq_enable *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 8, en);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpbp_cmd_set_irq_enable *)cmd.params;
+       cmd_params->enable = en & DPBP_ENABLE;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -389,12 +402,15 @@ int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
                        u8 *en)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_get_irq_enable *cmd_params;
+       struct dpbp_rsp_get_irq_enable *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpbp_cmd_get_irq_enable *)cmd.params;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -402,7 +418,8 @@ int dpbp_get_irq_enable(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *en = (u8)mc_dec(cmd.params[0], 0, 8);
+       rsp_params = (struct dpbp_rsp_get_irq_enable *)cmd.params;
+       *en = rsp_params->enabled & DPBP_ENABLE;
        return 0;
 }
 
@@ -429,12 +446,14 @@ int dpbp_set_irq_mask(struct fsl_mc_io *mc_io,
                      u32 mask)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_set_irq_mask *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, mask);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpbp_cmd_set_irq_mask *)cmd.params;
+       cmd_params->mask = cpu_to_le32(mask);
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -460,12 +479,15 @@ int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
                      u32 *mask)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_get_irq_mask *cmd_params;
+       struct dpbp_rsp_get_irq_mask *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpbp_cmd_get_irq_mask *)cmd.params;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -473,7 +495,9 @@ int dpbp_get_irq_mask(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *mask = (u32)mc_dec(cmd.params[0], 0, 32);
+       rsp_params = (struct dpbp_rsp_get_irq_mask *)cmd.params;
+       *mask = le32_to_cpu(rsp_params->mask);
+
        return 0;
 }
 
@@ -497,13 +521,16 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
                        u32 *status)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_get_irq_status *cmd_params;
+       struct dpbp_rsp_get_irq_status *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, *status);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpbp_cmd_get_irq_status *)cmd.params;
+       cmd_params->status = cpu_to_le32(*status);
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -511,7 +538,9 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *status = (u32)mc_dec(cmd.params[0], 0, 32);
+       rsp_params = (struct dpbp_rsp_get_irq_status *)cmd.params;
+       *status = le32_to_cpu(rsp_params->status);
+
        return 0;
 }
 
@@ -535,12 +564,14 @@ int dpbp_clear_irq_status(struct fsl_mc_io *mc_io,
                          u32 status)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_clear_irq_status *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, status);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpbp_cmd_clear_irq_status *)cmd.params;
+       cmd_params->status = cpu_to_le32(status);
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -562,6 +593,7 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
                        struct dpbp_attr *attr)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_rsp_get_attributes *rsp_params;
        int err;
 
        /* prepare command */
@@ -574,10 +606,12 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       attr->bpid = (u16)mc_dec(cmd.params[0], 16, 16);
-       attr->id = (int)mc_dec(cmd.params[0], 32, 32);
-       attr->version.major = (u16)mc_dec(cmd.params[1], 0, 16);
-       attr->version.minor = (u16)mc_dec(cmd.params[1], 16, 16);
+       rsp_params = (struct dpbp_rsp_get_attributes *)cmd.params;
+       attr->bpid = le16_to_cpu(rsp_params->bpid);
+       attr->id = le32_to_cpu(rsp_params->id);
+       attr->version.major = le16_to_cpu(rsp_params->version_major);
+       attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
        return 0;
 }
 EXPORT_SYMBOL(dpbp_get_attributes);
@@ -597,19 +631,19 @@ int dpbp_set_notifications(struct fsl_mc_io *mc_io,
                           struct dpbp_notification_cfg *cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_cmd_set_notifications *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
-                                         cmd_flags,
-                                         token);
-
-       cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry);
-       cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit);
-       cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry);
-       cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit);
-       cmd.params[2] |= mc_enc(0, 16, cfg->options);
-       cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx);
-       cmd.params[4] |= mc_enc(0, 64, cfg->message_iova);
+                                         cmd_flags, token);
+       cmd_params = (struct dpbp_cmd_set_notifications *)cmd.params;
+       cmd_params->depletion_entry = cpu_to_le32(cfg->depletion_entry);
+       cmd_params->depletion_exit = cpu_to_le32(cfg->depletion_exit);
+       cmd_params->surplus_entry = cpu_to_le32(cfg->surplus_entry);
+       cmd_params->surplus_exit = cpu_to_le32(cfg->surplus_exit);
+       cmd_params->options = cpu_to_le16(cfg->options);
+       cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+       cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -630,6 +664,7 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
                           struct dpbp_notification_cfg *cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dpbp_rsp_get_notifications *rsp_params;
        int err;
 
        /* prepare command */
@@ -643,13 +678,14 @@ int dpbp_get_notifications(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       cfg->depletion_entry = (u32)mc_dec(cmd.params[0], 0, 32);
-       cfg->depletion_exit = (u32)mc_dec(cmd.params[0], 32, 32);
-       cfg->surplus_entry = (u32)mc_dec(cmd.params[1], 0, 32);
-       cfg->surplus_exit = (u32)mc_dec(cmd.params[1], 32, 32);
-       cfg->options = (u16)mc_dec(cmd.params[2], 0, 16);
-       cfg->message_ctx = (u64)mc_dec(cmd.params[3], 0, 64);
-       cfg->message_iova = (u64)mc_dec(cmd.params[4], 0, 64);
+       rsp_params = (struct dpbp_rsp_get_notifications *)cmd.params;
+       cfg->depletion_entry = le32_to_cpu(rsp_params->depletion_entry);
+       cfg->depletion_exit = le32_to_cpu(rsp_params->depletion_exit);
+       cfg->surplus_entry = le32_to_cpu(rsp_params->surplus_entry);
+       cfg->surplus_exit = le32_to_cpu(rsp_params->surplus_exit);
+       cfg->options = le16_to_cpu(rsp_params->options);
+       cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+       cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
 
        return 0;
 }
index c9b52dd7ba31dcc6a0f0d994b241fa91062b6c59..d098a6d8f6bced916d68ae010d67ce25adecd2eb 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
 #define DPMCP_CMDID_GET_IRQ_MASK                       0x015
 #define DPMCP_CMDID_GET_IRQ_STATUS                     0x016
 
+struct dpmcp_cmd_open {
+       __le32 dpmcp_id;
+};
+
+struct dpmcp_cmd_create {
+       __le32 portal_id;
+};
+
+struct dpmcp_cmd_set_irq {
+       /* cmd word 0 */
+       u8 irq_index;
+       u8 pad[3];
+       __le32 irq_val;
+       /* cmd word 1 */
+       __le64 irq_addr;
+       /* cmd word 2 */
+       __le32 irq_num;
+};
+
+struct dpmcp_cmd_get_irq {
+       __le32 pad;
+       u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq {
+       /* cmd word 0 */
+       __le32 irq_val;
+       __le32 pad;
+       /* cmd word 1 */
+       __le64 irq_paddr;
+       /* cmd word 2 */
+       __le32 irq_num;
+       __le32 type;
+};
+
+#define DPMCP_ENABLE           0x1
+
+struct dpmcp_cmd_set_irq_enable {
+       u8 enable;
+       u8 pad[3];
+       u8 irq_index;
+};
+
+struct dpmcp_cmd_get_irq_enable {
+       __le32 pad;
+       u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq_enable {
+       u8 enabled;
+};
+
+struct dpmcp_cmd_set_irq_mask {
+       __le32 mask;
+       u8 irq_index;
+};
+
+struct dpmcp_cmd_get_irq_mask {
+       __le32 pad;
+       u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq_mask {
+       __le32 mask;
+};
+
+struct dpmcp_cmd_get_irq_status {
+       __le32 status;
+       u8 irq_index;
+};
+
+struct dpmcp_rsp_get_irq_status {
+       __le32 status;
+};
+
+struct dpmcp_rsp_get_attributes {
+       /* response word 0 */
+       __le32 pad;
+       __le32 id;
+       /* response word 1 */
+       __le16 version_major;
+       __le16 version_minor;
+};
+
 #endif /* _FSL_DPMCP_CMD_H */
index fd6dd4e07b8724b26a60585ff6092a75ba8818f3..06440176243ac45165cf0635db9622cc98d7e759 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2013-2015 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -57,12 +57,14 @@ int dpmcp_open(struct fsl_mc_io *mc_io,
               u16 *token)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_cmd_open *cmd_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN,
                                          cmd_flags, 0);
-       cmd.params[0] |= mc_enc(0, 32, dpmcp_id);
+       cmd_params = (struct dpmcp_cmd_open *)cmd.params;
+       cmd_params->dpmcp_id = cpu_to_le32(dpmcp_id);
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -70,7 +72,7 @@ int dpmcp_open(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+       *token = mc_cmd_hdr_read_token(&cmd);
 
        return err;
 }
@@ -127,12 +129,15 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
                 u16 *token)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_cmd_create *cmd_params;
+
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE,
                                          cmd_flags, 0);
-       cmd.params[0] |= mc_enc(0, 32, cfg->portal_id);
+       cmd_params = (struct dpmcp_cmd_create *)cmd.params;
+       cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -140,7 +145,7 @@ int dpmcp_create(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+       *token = mc_cmd_hdr_read_token(&cmd);
 
        return 0;
 }
@@ -206,14 +211,16 @@ int dpmcp_set_irq(struct fsl_mc_io *mc_io,
                  struct dpmcp_irq_cfg  *irq_cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_cmd_set_irq *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 8, irq_index);
-       cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
-       cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
-       cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+       cmd_params = (struct dpmcp_cmd_set_irq *)cmd.params;
+       cmd_params->irq_index = irq_index;
+       cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+       cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+       cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -239,12 +246,15 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
                  struct dpmcp_irq_cfg  *irq_cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_cmd_get_irq *cmd_params;
+       struct dpmcp_rsp_get_irq *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpmcp_cmd_get_irq *)cmd.params;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -252,10 +262,11 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
-       irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
-       irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
-       *type = (int)mc_dec(cmd.params[2], 32, 32);
+       rsp_params = (struct dpmcp_rsp_get_irq *)cmd.params;
+       irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+       irq_cfg->paddr = le64_to_cpu(rsp_params->irq_paddr);
+       irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+       *type = le32_to_cpu(rsp_params->type);
        return 0;
 }
 
@@ -281,12 +292,14 @@ int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io,
                         u8 en)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_cmd_set_irq_enable *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 8, en);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpmcp_cmd_set_irq_enable *)cmd.params;
+       cmd_params->enable = en & DPMCP_ENABLE;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -309,12 +322,15 @@ int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
                         u8 *en)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_cmd_get_irq_enable *cmd_params;
+       struct dpmcp_rsp_get_irq_enable *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpmcp_cmd_get_irq_enable *)cmd.params;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -322,7 +338,8 @@ int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *en = (u8)mc_dec(cmd.params[0], 0, 8);
+       rsp_params = (struct dpmcp_rsp_get_irq_enable *)cmd.params;
+       *en = rsp_params->enabled & DPMCP_ENABLE;
        return 0;
 }
 
@@ -349,12 +366,15 @@ int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io,
                       u32 mask)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_cmd_set_irq_mask *cmd_params;
+
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, mask);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpmcp_cmd_set_irq_mask *)cmd.params;
+       cmd_params->mask = cpu_to_le32(mask);
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -380,12 +400,16 @@ int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
                       u32 *mask)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_cmd_get_irq_mask *cmd_params;
+       struct dpmcp_rsp_get_irq_mask *rsp_params;
+
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpmcp_cmd_get_irq_mask *)cmd.params;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -393,7 +417,9 @@ int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *mask = (u32)mc_dec(cmd.params[0], 0, 32);
+       rsp_params = (struct dpmcp_rsp_get_irq_mask *)cmd.params;
+       *mask = le32_to_cpu(rsp_params->mask);
+
        return 0;
 }
 
@@ -417,12 +443,16 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
                         u32 *status)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_cmd_get_irq_status *cmd_params;
+       struct dpmcp_rsp_get_irq_status *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dpmcp_cmd_get_irq_status *)cmd.params;
+       cmd_params->status = cpu_to_le32(*status);
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -430,7 +460,9 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *status = (u32)mc_dec(cmd.params[0], 0, 32);
+       rsp_params = (struct dpmcp_rsp_get_irq_status *)cmd.params;
+       *status = le32_to_cpu(rsp_params->status);
+
        return 0;
 }
 
@@ -450,6 +482,7 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
                         struct dpmcp_attr *attr)
 {
        struct mc_command cmd = { 0 };
+       struct dpmcp_rsp_get_attributes *rsp_params;
        int err;
 
        /* prepare command */
@@ -462,8 +495,10 @@ int dpmcp_get_attributes(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       attr->id = (int)mc_dec(cmd.params[0], 32, 32);
-       attr->version.major = (u16)mc_dec(cmd.params[1], 0, 16);
-       attr->version.minor = (u16)mc_dec(cmd.params[1], 16, 16);
+       rsp_params = (struct dpmcp_rsp_get_attributes *)cmd.params;
+       attr->id = le32_to_cpu(rsp_params->id);
+       attr->version.major = le16_to_cpu(rsp_params->version_major);
+       attr->version.minor = le16_to_cpu(rsp_params->version_minor);
+
        return 0;
 }
index ba8cfa9635dd154dbf38b06c53a678fa22888aa7..779bf9c25bc01ebddefe43c73767267a5188a5a5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
 #define DPMNG_CMDID_GET_CONT_ID                        0x830
 #define DPMNG_CMDID_GET_VERSION                        0x831
 
+struct dpmng_rsp_get_container_id {
+       __le32 container_id;
+};
+
+struct dpmng_rsp_get_version {
+       __le32 revision;
+       __le32 version_major;
+       __le32 version_minor;
+};
+
 #endif /* __FSL_DPMNG_CMD_H */
index f633fcd86e51d6973ac0b08bffe8809e0e59fd77..660bbe7ea899b0c52572afc5e3bf342b25085d7d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
@@ -48,6 +48,7 @@ int mc_get_version(struct fsl_mc_io *mc_io,
                   struct mc_version *mc_ver_info)
 {
        struct mc_command cmd = { 0 };
+       struct dpmng_rsp_get_version *rsp_params;
        int err;
 
        /* prepare command */
@@ -61,12 +62,14 @@ int mc_get_version(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       mc_ver_info->revision = mc_dec(cmd.params[0], 0, 32);
-       mc_ver_info->major = mc_dec(cmd.params[0], 32, 32);
-       mc_ver_info->minor = mc_dec(cmd.params[1], 0, 32);
+       rsp_params = (struct dpmng_rsp_get_version *)cmd.params;
+       mc_ver_info->revision = le32_to_cpu(rsp_params->revision);
+       mc_ver_info->major = le32_to_cpu(rsp_params->version_major);
+       mc_ver_info->minor = le32_to_cpu(rsp_params->version_minor);
 
        return 0;
 }
+EXPORT_SYMBOL(mc_get_version);
 
 /**
  * dpmng_get_container_id() - Get container ID associated with a given portal.
@@ -81,6 +84,7 @@ int dpmng_get_container_id(struct fsl_mc_io *mc_io,
                           int *container_id)
 {
        struct mc_command cmd = { 0 };
+       struct dpmng_rsp_get_container_id *rsp_params;
        int err;
 
        /* prepare command */
@@ -94,7 +98,8 @@ int dpmng_get_container_id(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *container_id = mc_dec(cmd.params[0], 0, 32);
+       rsp_params = (struct dpmng_rsp_get_container_id *)cmd.params;
+       *container_id = le32_to_cpu(rsp_params->container_id);
 
        return 0;
 }
index 9b854fa8e84d6f0665955cb53628a9a9e3a6b5ed..bb127f4a3ae7d0fdd3715aec2c08531d1048c3ec 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
 
 #define DPRC_CMDID_GET_CONNECTION              0x16C
 
+struct dprc_cmd_open {
+       __le32 container_id;
+};
+
+struct dprc_cmd_create_container {
+       /* cmd word 0 */
+       __le32 options;
+       __le16 icid;
+       __le16 pad0;
+       /* cmd word 1 */
+       __le32 pad1;
+       __le32 portal_id;
+       /* cmd words 2-3 */
+       u8 label[16];
+};
+
+struct dprc_rsp_create_container {
+       /* response word 0 */
+       __le64 pad0;
+       /* response word 1 */
+       __le32 child_container_id;
+       __le32 pad1;
+       /* response word 2 */
+       __le64 child_portal_addr;
+};
+
+struct dprc_cmd_destroy_container {
+       __le32 child_container_id;
+};
+
+struct dprc_cmd_reset_container {
+       __le32 child_container_id;
+};
+
+struct dprc_cmd_set_irq {
+       /* cmd word 0 */
+       __le32 irq_val;
+       u8 irq_index;
+       u8 pad[3];
+       /* cmd word 1 */
+       __le64 irq_addr;
+       /* cmd word 2 */
+       __le32 irq_num;
+};
+
+struct dprc_cmd_get_irq {
+       __le32 pad;
+       u8 irq_index;
+};
+
+struct dprc_rsp_get_irq {
+       /* response word 0 */
+       __le32 irq_val;
+       __le32 pad;
+       /* response word 1 */
+       __le64 irq_addr;
+       /* response word 2 */
+       __le32 irq_num;
+       __le32 type;
+};
+
+#define DPRC_ENABLE            0x1
+
+struct dprc_cmd_set_irq_enable {
+       u8 enable;
+       u8 pad[3];
+       u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_enable {
+       __le32 pad;
+       u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_enable {
+       u8 enabled;
+};
+
+struct dprc_cmd_set_irq_mask {
+       __le32 mask;
+       u8 irq_index;
+};
+
+struct dprc_cmd_get_irq_mask {
+       __le32 pad;
+       u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_mask {
+       __le32 mask;
+};
+
+struct dprc_cmd_get_irq_status {
+       __le32 status;
+       u8 irq_index;
+};
+
+struct dprc_rsp_get_irq_status {
+       __le32 status;
+};
+
+struct dprc_cmd_clear_irq_status {
+       __le32 status;
+       u8 irq_index;
+};
+
+struct dprc_rsp_get_attributes {
+       /* response word 0 */
+       __le32 container_id;
+       __le16 icid;
+       __le16 pad;
+       /* response word 1 */
+       __le32 options;
+       __le32 portal_id;
+       /* response word 2 */
+       __le16 version_major;
+       __le16 version_minor;
+};
+
+struct dprc_cmd_set_res_quota {
+       /* cmd word 0 */
+       __le32 child_container_id;
+       __le16 quota;
+       __le16 pad;
+       /* cmd words 1-2 */
+       u8 type[16];
+};
+
+struct dprc_cmd_get_res_quota {
+       /* cmd word 0 */
+       __le32 child_container_id;
+       __le32 pad;
+       /* cmd word 1-2 */
+       u8 type[16];
+};
+
+struct dprc_rsp_get_res_quota {
+       __le32 pad;
+       __le16 quota;
+};
+
+struct dprc_cmd_assign {
+       /* cmd word 0 */
+       __le32 container_id;
+       __le32 options;
+       /* cmd word 1 */
+       __le32 num;
+       __le32 id_base_align;
+       /* cmd word 2-3 */
+       u8 type[16];
+};
+
+struct dprc_cmd_unassign {
+       /* cmd word 0 */
+       __le32 child_container_id;
+       __le32 options;
+       /* cmd word 1 */
+       __le32 num;
+       __le32 id_base_align;
+       /* cmd word 2-3 */
+       u8 type[16];
+};
+
+struct dprc_rsp_get_pool_count {
+       __le32 pool_count;
+};
+
+struct dprc_cmd_get_pool {
+       __le32 pool_index;
+};
+
+struct dprc_rsp_get_pool {
+       /* response word 0 */
+       __le64 pad;
+       /* response word 1-2 */
+       u8 type[16];
+};
+
+struct dprc_rsp_get_obj_count {
+       __le32 pad;
+       __le32 obj_count;
+};
+
+struct dprc_cmd_get_obj {
+       __le32 obj_index;
+};
+
+struct dprc_rsp_get_obj {
+       /* response word 0 */
+       __le32 pad0;
+       __le32 id;
+       /* response word 1 */
+       __le16 vendor;
+       u8 irq_count;
+       u8 region_count;
+       __le32 state;
+       /* response word 2 */
+       __le16 version_major;
+       __le16 version_minor;
+       __le16 flags;
+       __le16 pad1;
+       /* response word 3-4 */
+       u8 type[16];
+       /* response word 5-6 */
+       u8 label[16];
+};
+
+struct dprc_cmd_get_obj_desc {
+       /* cmd word 0 */
+       __le32 obj_id;
+       __le32 pad;
+       /* cmd word 1-2 */
+       u8 type[16];
+};
+
+struct dprc_rsp_get_obj_desc {
+       /* response word 0 */
+       __le32 pad0;
+       __le32 id;
+       /* response word 1 */
+       __le16 vendor;
+       u8 irq_count;
+       u8 region_count;
+       __le32 state;
+       /* response word 2 */
+       __le16 version_major;
+       __le16 version_minor;
+       __le16 flags;
+       __le16 pad1;
+       /* response word 3-4 */
+       u8 type[16];
+       /* response word 5-6 */
+       u8 label[16];
+};
+
+struct dprc_cmd_get_res_count {
+       /* cmd word 0 */
+       __le64 pad;
+       /* cmd word 1-2 */
+       u8 type[16];
+};
+
+struct dprc_rsp_get_res_count {
+       __le32 res_count;
+};
+
+struct dprc_cmd_get_res_ids {
+       /* cmd word 0 */
+       u8 pad0[5];
+       u8 iter_status;
+       __le16 pad1;
+       /* cmd word 1 */
+       __le32 base_id;
+       __le32 last_id;
+       /* cmd word 2-3 */
+       u8 type[16];
+};
+
+struct dprc_rsp_get_res_ids {
+       /* response word 0 */
+       u8 pad0[5];
+       u8 iter_status;
+       __le16 pad1;
+       /* response word 1 */
+       __le32 base_id;
+       __le32 last_id;
+};
+
+struct dprc_cmd_get_obj_region {
+       /* cmd word 0 */
+       __le32 obj_id;
+       __le16 pad0;
+       u8 region_index;
+       u8 pad1;
+       /* cmd word 1-2 */
+       __le64 pad2[2];
+       /* cmd word 3-4 */
+       u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_region {
+       /* response word 0 */
+       __le64 pad;
+       /* response word 1 */
+       __le64 base_addr;
+       /* response word 2 */
+       __le32 size;
+};
+
+struct dprc_cmd_set_obj_label {
+       /* cmd word 0 */
+       __le32 obj_id;
+       __le32 pad;
+       /* cmd word 1-2 */
+       u8 label[16];
+       /* cmd word 3-4 */
+       u8 obj_type[16];
+};
+
+struct dprc_cmd_set_obj_irq {
+       /* cmd word 0 */
+       __le32 irq_val;
+       u8 irq_index;
+       u8 pad[3];
+       /* cmd word 1 */
+       __le64 irq_addr;
+       /* cmd word 2 */
+       __le32 irq_num;
+       __le32 obj_id;
+       /* cmd word 3-4 */
+       u8 obj_type[16];
+};
+
+struct dprc_cmd_get_obj_irq {
+       /* cmd word 0 */
+       __le32 obj_id;
+       u8 irq_index;
+       u8 pad[3];
+       /* cmd word 1-2 */
+       u8 obj_type[16];
+};
+
+struct dprc_rsp_get_obj_irq {
+       /* response word 0 */
+       __le32 irq_val;
+       __le32 pad;
+       /* response word 1 */
+       __le64 irq_addr;
+       /* response word 2 */
+       __le32 irq_num;
+       __le32 type;
+};
+
+struct dprc_cmd_connect {
+       /* cmd word 0 */
+       __le32 ep1_id;
+       __le32 ep1_interface_id;
+       /* cmd word 1 */
+       __le32 ep2_id;
+       __le32 ep2_interface_id;
+       /* cmd word 2-3 */
+       u8 ep1_type[16];
+       /* cmd word 4 */
+       __le32 max_rate;
+       __le32 committed_rate;
+       /* cmd word 5-6 */
+       u8 ep2_type[16];
+};
+
+struct dprc_cmd_disconnect {
+       /* cmd word 0 */
+       __le32 id;
+       __le32 interface_id;
+       /* cmd word 1-2 */
+       u8 type[16];
+};
+
+struct dprc_cmd_get_connection {
+       /* cmd word 0 */
+       __le32 ep1_id;
+       __le32 ep1_interface_id;
+       /* cmd word 1-2 */
+       u8 ep1_type[16];
+};
+
+struct dprc_rsp_get_connection {
+       /* response word 0-2 */
+       __le64 pad[3];
+       /* response word 3 */
+       __le32 ep2_id;
+       __le32 ep2_interface_id;
+       /* response word 4-5 */
+       u8 ep2_type[16];
+       /* response word 6 */
+       __le32 state;
+};
+
 #endif /* _FSL_DPRC_CMD_H */
index 7fc47173c1645632f7c1309c2072626dd2469a80..d2a71f14bf72036a5e3d2d55518091b3b66420ee 100644 (file)
@@ -760,7 +760,12 @@ error_cleanup_msi_domain:
  */
 static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
 {
+       struct fsl_mc_device_irq *irq = mc_dev->irqs[0];
+
        (void)disable_dprc_irq(mc_dev);
+
+       devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+
        fsl_mc_free_irqs(mc_dev);
 }
 
@@ -791,21 +796,28 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
                dprc_teardown_irq(mc_dev);
 
        device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove);
+
+       if (dev_get_msi_domain(&mc_dev->dev)) {
+               fsl_mc_cleanup_irq_pool(mc_bus);
+               dev_set_msi_domain(&mc_dev->dev, NULL);
+       }
+
        dprc_cleanup_all_resource_pools(mc_dev);
+
        error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
        if (error < 0)
                dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error);
 
-       if (dev_get_msi_domain(&mc_dev->dev)) {
-               fsl_mc_cleanup_irq_pool(mc_bus);
-               dev_set_msi_domain(&mc_dev->dev, NULL);
+       if (!fsl_mc_is_root_dprc(&mc_dev->dev)) {
+               fsl_destroy_mc_io(mc_dev->mc_io);
+               mc_dev->mc_io = NULL;
        }
 
        dev_info(&mc_dev->dev, "DPRC device unbound from driver");
        return 0;
 }
 
-static const struct fsl_mc_device_match_id match_id_table[] = {
+static const struct fsl_mc_device_id match_id_table[] = {
        {
         .vendor = FSL_MC_VENDOR_FREESCALE,
         .obj_type = "dprc"},
index a2c47377cc4e2d3bccd64510b4e543dbd823e293..c260549813335888c5f8005c5c3d5d3a6cc0ca0c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
@@ -51,12 +51,14 @@ int dprc_open(struct fsl_mc_io *mc_io,
              u16 *token)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_open *cmd_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags,
                                          0);
-       cmd.params[0] |= mc_enc(0, 32, container_id);
+       cmd_params = (struct dprc_cmd_open *)cmd.params;
+       cmd_params->container_id = cpu_to_le32(container_id);
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -64,7 +66,7 @@ int dprc_open(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+       *token = mc_cmd_hdr_read_token(&cmd);
 
        return 0;
 }
@@ -115,28 +117,17 @@ int dprc_create_container(struct fsl_mc_io *mc_io,
                          u64 *child_portal_offset)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_create_container *cmd_params;
+       struct dprc_rsp_create_container *rsp_params;
        int err;
 
        /* prepare command */
-       cmd.params[0] |= mc_enc(32, 16, cfg->icid);
-       cmd.params[0] |= mc_enc(0, 32, cfg->options);
-       cmd.params[1] |= mc_enc(32, 32, cfg->portal_id);
-       cmd.params[2] |= mc_enc(0, 8, cfg->label[0]);
-       cmd.params[2] |= mc_enc(8, 8, cfg->label[1]);
-       cmd.params[2] |= mc_enc(16, 8, cfg->label[2]);
-       cmd.params[2] |= mc_enc(24, 8, cfg->label[3]);
-       cmd.params[2] |= mc_enc(32, 8, cfg->label[4]);
-       cmd.params[2] |= mc_enc(40, 8, cfg->label[5]);
-       cmd.params[2] |= mc_enc(48, 8, cfg->label[6]);
-       cmd.params[2] |= mc_enc(56, 8, cfg->label[7]);
-       cmd.params[3] |= mc_enc(0, 8, cfg->label[8]);
-       cmd.params[3] |= mc_enc(8, 8, cfg->label[9]);
-       cmd.params[3] |= mc_enc(16, 8, cfg->label[10]);
-       cmd.params[3] |= mc_enc(24, 8, cfg->label[11]);
-       cmd.params[3] |= mc_enc(32, 8, cfg->label[12]);
-       cmd.params[3] |= mc_enc(40, 8, cfg->label[13]);
-       cmd.params[3] |= mc_enc(48, 8, cfg->label[14]);
-       cmd.params[3] |= mc_enc(56, 8, cfg->label[15]);
+       cmd_params = (struct dprc_cmd_create_container *)cmd.params;
+       cmd_params->options = cpu_to_le32(cfg->options);
+       cmd_params->icid = cpu_to_le16(cfg->icid);
+       cmd_params->portal_id = cpu_to_le32(cfg->portal_id);
+       strncpy(cmd_params->label, cfg->label, 16);
+       cmd_params->label[15] = '\0';
 
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT,
                                          cmd_flags, token);
@@ -147,8 +138,9 @@ int dprc_create_container(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *child_container_id = mc_dec(cmd.params[1], 0, 32);
-       *child_portal_offset = mc_dec(cmd.params[2], 0, 64);
+       rsp_params = (struct dprc_rsp_create_container *)cmd.params;
+       *child_container_id = le32_to_cpu(rsp_params->child_container_id);
+       *child_portal_offset = le64_to_cpu(rsp_params->child_portal_addr);
 
        return 0;
 }
@@ -181,11 +173,13 @@ int dprc_destroy_container(struct fsl_mc_io *mc_io,
                           int child_container_id)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_destroy_container *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, child_container_id);
+       cmd_params = (struct dprc_cmd_destroy_container *)cmd.params;
+       cmd_params->child_container_id = cpu_to_le32(child_container_id);
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -219,11 +213,13 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
                         int child_container_id)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_reset_container *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, child_container_id);
+       cmd_params = (struct dprc_cmd_reset_container *)cmd.params;
+       cmd_params->child_container_id = cpu_to_le32(child_container_id);
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -249,13 +245,16 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
                 struct dprc_irq_cfg *irq_cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_irq *cmd_params;
+       struct dprc_rsp_get_irq *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dprc_cmd_get_irq *)cmd.params;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -263,10 +262,11 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       irq_cfg->val = mc_dec(cmd.params[0], 0, 32);
-       irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64);
-       irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32);
-       *type = mc_dec(cmd.params[2], 32, 32);
+       rsp_params = (struct dprc_rsp_get_irq *)cmd.params;
+       irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+       irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
+       irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+       *type = le32_to_cpu(rsp_params->type);
 
        return 0;
 }
@@ -288,15 +288,17 @@ int dprc_set_irq(struct fsl_mc_io *mc_io,
                 struct dprc_irq_cfg *irq_cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_set_irq *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
-       cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
-       cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
-       cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
+       cmd_params = (struct dprc_cmd_set_irq *)cmd.params;
+       cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+       cmd_params->irq_index = irq_index;
+       cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+       cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -319,12 +321,15 @@ int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
                        u8 *en)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_irq_enable *cmd_params;
+       struct dprc_rsp_get_irq_enable *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dprc_cmd_get_irq_enable *)cmd.params;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -332,7 +337,8 @@ int dprc_get_irq_enable(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *en = mc_dec(cmd.params[0], 0, 8);
+       rsp_params = (struct dprc_rsp_get_irq_enable *)cmd.params;
+       *en = rsp_params->enabled & DPRC_ENABLE;
 
        return 0;
 }
@@ -359,12 +365,14 @@ int dprc_set_irq_enable(struct fsl_mc_io *mc_io,
                        u8 en)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_set_irq_enable *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 8, en);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dprc_cmd_set_irq_enable *)cmd.params;
+       cmd_params->enable = en & DPRC_ENABLE;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -390,12 +398,15 @@ int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
                      u32 *mask)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_irq_mask *cmd_params;
+       struct dprc_rsp_get_irq_mask *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dprc_cmd_get_irq_mask *)cmd.params;
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -403,7 +414,8 @@ int dprc_get_irq_mask(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *mask = mc_dec(cmd.params[0], 0, 32);
+       rsp_params = (struct dprc_rsp_get_irq_mask *)cmd.params;
+       *mask = le32_to_cpu(rsp_params->mask);
 
        return 0;
 }
@@ -431,12 +443,14 @@ int dprc_set_irq_mask(struct fsl_mc_io *mc_io,
                      u32 mask)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_set_irq_mask *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, mask);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dprc_cmd_set_irq_mask *)cmd.params;
+       cmd_params->mask = cpu_to_le32(mask);
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -461,13 +475,16 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
                        u32 *status)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_irq_status *cmd_params;
+       struct dprc_rsp_get_irq_status *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, *status);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dprc_cmd_get_irq_status *)cmd.params;
+       cmd_params->status = cpu_to_le32(*status);
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -475,7 +492,8 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *status = mc_dec(cmd.params[0], 0, 32);
+       rsp_params = (struct dprc_rsp_get_irq_status *)cmd.params;
+       *status = le32_to_cpu(rsp_params->status);
 
        return 0;
 }
@@ -499,12 +517,14 @@ int dprc_clear_irq_status(struct fsl_mc_io *mc_io,
                          u32 status)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_clear_irq_status *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, status);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
+       cmd_params = (struct dprc_cmd_clear_irq_status *)cmd.params;
+       cmd_params->status = cpu_to_le32(status);
+       cmd_params->irq_index = irq_index;
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -525,6 +545,7 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
                        struct dprc_attributes *attr)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_rsp_get_attributes *rsp_params;
        int err;
 
        /* prepare command */
@@ -538,12 +559,13 @@ int dprc_get_attributes(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       attr->container_id = mc_dec(cmd.params[0], 0, 32);
-       attr->icid = mc_dec(cmd.params[0], 32, 16);
-       attr->options = mc_dec(cmd.params[1], 0, 32);
-       attr->portal_id = mc_dec(cmd.params[1], 32, 32);
-       attr->version.major = mc_dec(cmd.params[2], 0, 16);
-       attr->version.minor = mc_dec(cmd.params[2], 16, 16);
+       rsp_params = (struct dprc_rsp_get_attributes *)cmd.params;
+       attr->container_id = le32_to_cpu(rsp_params->container_id);
+       attr->icid = le16_to_cpu(rsp_params->icid);
+       attr->options = le32_to_cpu(rsp_params->options);
+       attr->portal_id = le32_to_cpu(rsp_params->portal_id);
+       attr->version.major = le16_to_cpu(rsp_params->version_major);
+       attr->version.minor = le16_to_cpu(rsp_params->version_minor);
 
        return 0;
 }
@@ -581,28 +603,16 @@ int dprc_set_res_quota(struct fsl_mc_io *mc_io,
                       u16 quota)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_set_res_quota *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, child_container_id);
-       cmd.params[0] |= mc_enc(32, 16, quota);
-       cmd.params[1] |= mc_enc(0, 8, type[0]);
-       cmd.params[1] |= mc_enc(8, 8, type[1]);
-       cmd.params[1] |= mc_enc(16, 8, type[2]);
-       cmd.params[1] |= mc_enc(24, 8, type[3]);
-       cmd.params[1] |= mc_enc(32, 8, type[4]);
-       cmd.params[1] |= mc_enc(40, 8, type[5]);
-       cmd.params[1] |= mc_enc(48, 8, type[6]);
-       cmd.params[1] |= mc_enc(56, 8, type[7]);
-       cmd.params[2] |= mc_enc(0, 8, type[8]);
-       cmd.params[2] |= mc_enc(8, 8, type[9]);
-       cmd.params[2] |= mc_enc(16, 8, type[10]);
-       cmd.params[2] |= mc_enc(24, 8, type[11]);
-       cmd.params[2] |= mc_enc(32, 8, type[12]);
-       cmd.params[2] |= mc_enc(40, 8, type[13]);
-       cmd.params[2] |= mc_enc(48, 8, type[14]);
-       cmd.params[2] |= mc_enc(56, 8, '\0');
+       cmd_params = (struct dprc_cmd_set_res_quota *)cmd.params;
+       cmd_params->child_container_id = cpu_to_le32(child_container_id);
+       cmd_params->quota = cpu_to_le16(quota);
+       strncpy(cmd_params->type, type, 16);
+       cmd_params->type[15] = '\0';
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -631,28 +641,17 @@ int dprc_get_res_quota(struct fsl_mc_io *mc_io,
                       u16 *quota)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_res_quota *cmd_params;
+       struct dprc_rsp_get_res_quota *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, child_container_id);
-       cmd.params[1] |= mc_enc(0, 8, type[0]);
-       cmd.params[1] |= mc_enc(8, 8, type[1]);
-       cmd.params[1] |= mc_enc(16, 8, type[2]);
-       cmd.params[1] |= mc_enc(24, 8, type[3]);
-       cmd.params[1] |= mc_enc(32, 8, type[4]);
-       cmd.params[1] |= mc_enc(40, 8, type[5]);
-       cmd.params[1] |= mc_enc(48, 8, type[6]);
-       cmd.params[1] |= mc_enc(56, 8, type[7]);
-       cmd.params[2] |= mc_enc(0, 8, type[8]);
-       cmd.params[2] |= mc_enc(8, 8, type[9]);
-       cmd.params[2] |= mc_enc(16, 8, type[10]);
-       cmd.params[2] |= mc_enc(24, 8, type[11]);
-       cmd.params[2] |= mc_enc(32, 8, type[12]);
-       cmd.params[2] |= mc_enc(40, 8, type[13]);
-       cmd.params[2] |= mc_enc(48, 8, type[14]);
-       cmd.params[2] |= mc_enc(56, 8, '\0');
+       cmd_params = (struct dprc_cmd_get_res_quota *)cmd.params;
+       cmd_params->child_container_id = cpu_to_le32(child_container_id);
+       strncpy(cmd_params->type, type, 16);
+       cmd_params->type[15] = '\0';
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -660,7 +659,8 @@ int dprc_get_res_quota(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *quota = mc_dec(cmd.params[0], 32, 16);
+       rsp_params = (struct dprc_rsp_get_res_quota *)cmd.params;
+       *quota = le16_to_cpu(rsp_params->quota);
 
        return 0;
 }
@@ -704,30 +704,18 @@ int dprc_assign(struct fsl_mc_io *mc_io,
                struct dprc_res_req *res_req)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_assign *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, container_id);
-       cmd.params[0] |= mc_enc(32, 32, res_req->options);
-       cmd.params[1] |= mc_enc(0, 32, res_req->num);
-       cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align);
-       cmd.params[2] |= mc_enc(0, 8, res_req->type[0]);
-       cmd.params[2] |= mc_enc(8, 8, res_req->type[1]);
-       cmd.params[2] |= mc_enc(16, 8, res_req->type[2]);
-       cmd.params[2] |= mc_enc(24, 8, res_req->type[3]);
-       cmd.params[2] |= mc_enc(32, 8, res_req->type[4]);
-       cmd.params[2] |= mc_enc(40, 8, res_req->type[5]);
-       cmd.params[2] |= mc_enc(48, 8, res_req->type[6]);
-       cmd.params[2] |= mc_enc(56, 8, res_req->type[7]);
-       cmd.params[3] |= mc_enc(0, 8, res_req->type[8]);
-       cmd.params[3] |= mc_enc(8, 8, res_req->type[9]);
-       cmd.params[3] |= mc_enc(16, 8, res_req->type[10]);
-       cmd.params[3] |= mc_enc(24, 8, res_req->type[11]);
-       cmd.params[3] |= mc_enc(32, 8, res_req->type[12]);
-       cmd.params[3] |= mc_enc(40, 8, res_req->type[13]);
-       cmd.params[3] |= mc_enc(48, 8, res_req->type[14]);
-       cmd.params[3] |= mc_enc(56, 8, res_req->type[15]);
+       cmd_params = (struct dprc_cmd_assign *)cmd.params;
+       cmd_params->container_id = cpu_to_le32(container_id);
+       cmd_params->options = cpu_to_le32(res_req->options);
+       cmd_params->num = cpu_to_le32(res_req->num);
+       cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+       strncpy(cmd_params->type, res_req->type, 16);
+       cmd_params->type[15] = '\0';
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -755,31 +743,19 @@ int dprc_unassign(struct fsl_mc_io *mc_io,
                  struct dprc_res_req *res_req)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_unassign *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(0, 32, child_container_id);
-       cmd.params[0] |= mc_enc(32, 32, res_req->options);
-       cmd.params[1] |= mc_enc(0, 32, res_req->num);
-       cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align);
-       cmd.params[2] |= mc_enc(0, 8, res_req->type[0]);
-       cmd.params[2] |= mc_enc(8, 8, res_req->type[1]);
-       cmd.params[2] |= mc_enc(16, 8, res_req->type[2]);
-       cmd.params[2] |= mc_enc(24, 8, res_req->type[3]);
-       cmd.params[2] |= mc_enc(32, 8, res_req->type[4]);
-       cmd.params[2] |= mc_enc(40, 8, res_req->type[5]);
-       cmd.params[2] |= mc_enc(48, 8, res_req->type[6]);
-       cmd.params[2] |= mc_enc(56, 8, res_req->type[7]);
-       cmd.params[3] |= mc_enc(0, 8, res_req->type[8]);
-       cmd.params[3] |= mc_enc(8, 8, res_req->type[9]);
-       cmd.params[3] |= mc_enc(16, 8, res_req->type[10]);
-       cmd.params[3] |= mc_enc(24, 8, res_req->type[11]);
-       cmd.params[3] |= mc_enc(32, 8, res_req->type[12]);
-       cmd.params[3] |= mc_enc(40, 8, res_req->type[13]);
-       cmd.params[3] |= mc_enc(48, 8, res_req->type[14]);
-       cmd.params[3] |= mc_enc(56, 8, res_req->type[15]);
+       cmd_params = (struct dprc_cmd_unassign *)cmd.params;
+       cmd_params->child_container_id = cpu_to_le32(child_container_id);
+       cmd_params->options = cpu_to_le32(res_req->options);
+       cmd_params->num = cpu_to_le32(res_req->num);
+       cmd_params->id_base_align = cpu_to_le32(res_req->id_base_align);
+       strncpy(cmd_params->type, res_req->type, 16);
+       cmd_params->type[15] = '\0';
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -800,6 +776,7 @@ int dprc_get_pool_count(struct fsl_mc_io *mc_io,
                        int *pool_count)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_rsp_get_pool_count *rsp_params;
        int err;
 
        /* prepare command */
@@ -812,7 +789,8 @@ int dprc_get_pool_count(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *pool_count = mc_dec(cmd.params[0], 0, 32);
+       rsp_params = (struct dprc_rsp_get_pool_count *)cmd.params;
+       *pool_count = le32_to_cpu(rsp_params->pool_count);
 
        return 0;
 }
@@ -839,13 +817,16 @@ int dprc_get_pool(struct fsl_mc_io *mc_io,
                  char *type)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_pool *cmd_params;
+       struct dprc_rsp_get_pool *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(0, 32, pool_index);
+       cmd_params = (struct dprc_cmd_get_pool *)cmd.params;
+       cmd_params->pool_index = cpu_to_le32(pool_index);
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -853,21 +834,8 @@ int dprc_get_pool(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       type[0] = mc_dec(cmd.params[1], 0, 8);
-       type[1] = mc_dec(cmd.params[1], 8, 8);
-       type[2] = mc_dec(cmd.params[1], 16, 8);
-       type[3] = mc_dec(cmd.params[1], 24, 8);
-       type[4] = mc_dec(cmd.params[1], 32, 8);
-       type[5] = mc_dec(cmd.params[1], 40, 8);
-       type[6] = mc_dec(cmd.params[1], 48, 8);
-       type[7] = mc_dec(cmd.params[1], 56, 8);
-       type[8] = mc_dec(cmd.params[2], 0, 8);
-       type[9] = mc_dec(cmd.params[2], 8, 8);
-       type[10] = mc_dec(cmd.params[2], 16, 8);
-       type[11] = mc_dec(cmd.params[2], 24, 8);
-       type[12] = mc_dec(cmd.params[2], 32, 8);
-       type[13] = mc_dec(cmd.params[2], 40, 8);
-       type[14] = mc_dec(cmd.params[2], 48, 8);
+       rsp_params = (struct dprc_rsp_get_pool *)cmd.params;
+       strncpy(type, rsp_params->type, 16);
        type[15] = '\0';
 
        return 0;
@@ -888,6 +856,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
                       int *obj_count)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_rsp_get_obj_count *rsp_params;
        int err;
 
        /* prepare command */
@@ -900,7 +869,8 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *obj_count = mc_dec(cmd.params[0], 32, 32);
+       rsp_params = (struct dprc_rsp_get_obj_count *)cmd.params;
+       *obj_count = le32_to_cpu(rsp_params->obj_count);
 
        return 0;
 }
@@ -928,13 +898,16 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
                 struct dprc_obj_desc *obj_desc)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_obj *cmd_params;
+       struct dprc_rsp_get_obj *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(0, 32, obj_index);
+       cmd_params = (struct dprc_cmd_get_obj *)cmd.params;
+       cmd_params->obj_index = cpu_to_le32(obj_index);
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -942,45 +915,18 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       obj_desc->id = mc_dec(cmd.params[0], 32, 32);
-       obj_desc->vendor = mc_dec(cmd.params[1], 0, 16);
-       obj_desc->irq_count = mc_dec(cmd.params[1], 16, 8);
-       obj_desc->region_count = mc_dec(cmd.params[1], 24, 8);
-       obj_desc->state = mc_dec(cmd.params[1], 32, 32);
-       obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16);
-       obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16);
-       obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
-       obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8);
-       obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8);
-       obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8);
-       obj_desc->type[3] = mc_dec(cmd.params[3], 24, 8);
-       obj_desc->type[4] = mc_dec(cmd.params[3], 32, 8);
-       obj_desc->type[5] = mc_dec(cmd.params[3], 40, 8);
-       obj_desc->type[6] = mc_dec(cmd.params[3], 48, 8);
-       obj_desc->type[7] = mc_dec(cmd.params[3], 56, 8);
-       obj_desc->type[8] = mc_dec(cmd.params[4], 0, 8);
-       obj_desc->type[9] = mc_dec(cmd.params[4], 8, 8);
-       obj_desc->type[10] = mc_dec(cmd.params[4], 16, 8);
-       obj_desc->type[11] = mc_dec(cmd.params[4], 24, 8);
-       obj_desc->type[12] = mc_dec(cmd.params[4], 32, 8);
-       obj_desc->type[13] = mc_dec(cmd.params[4], 40, 8);
-       obj_desc->type[14] = mc_dec(cmd.params[4], 48, 8);
+       rsp_params = (struct dprc_rsp_get_obj *)cmd.params;
+       obj_desc->id = le32_to_cpu(rsp_params->id);
+       obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+       obj_desc->irq_count = rsp_params->irq_count;
+       obj_desc->region_count = rsp_params->region_count;
+       obj_desc->state = le32_to_cpu(rsp_params->state);
+       obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+       obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+       obj_desc->flags = le16_to_cpu(rsp_params->flags);
+       strncpy(obj_desc->type, rsp_params->type, 16);
        obj_desc->type[15] = '\0';
-       obj_desc->label[0] = mc_dec(cmd.params[5], 0, 8);
-       obj_desc->label[1] = mc_dec(cmd.params[5], 8, 8);
-       obj_desc->label[2] = mc_dec(cmd.params[5], 16, 8);
-       obj_desc->label[3] = mc_dec(cmd.params[5], 24, 8);
-       obj_desc->label[4] = mc_dec(cmd.params[5], 32, 8);
-       obj_desc->label[5] = mc_dec(cmd.params[5], 40, 8);
-       obj_desc->label[6] = mc_dec(cmd.params[5], 48, 8);
-       obj_desc->label[7] = mc_dec(cmd.params[5], 56, 8);
-       obj_desc->label[8] = mc_dec(cmd.params[6], 0, 8);
-       obj_desc->label[9] = mc_dec(cmd.params[6], 8, 8);
-       obj_desc->label[10] = mc_dec(cmd.params[6], 16, 8);
-       obj_desc->label[11] = mc_dec(cmd.params[6], 24, 8);
-       obj_desc->label[12] = mc_dec(cmd.params[6], 32, 8);
-       obj_desc->label[13] = mc_dec(cmd.params[6], 40, 8);
-       obj_desc->label[14] = mc_dec(cmd.params[6], 48, 8);
+       strncpy(obj_desc->label, rsp_params->label, 16);
        obj_desc->label[15] = '\0';
        return 0;
 }
@@ -1007,29 +953,18 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
                      struct dprc_obj_desc *obj_desc)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_obj_desc *cmd_params;
+       struct dprc_rsp_get_obj_desc *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(0, 32, obj_id);
-       cmd.params[1] |= mc_enc(0, 8, obj_type[0]);
-       cmd.params[1] |= mc_enc(8, 8, obj_type[1]);
-       cmd.params[1] |= mc_enc(16, 8, obj_type[2]);
-       cmd.params[1] |= mc_enc(24, 8, obj_type[3]);
-       cmd.params[1] |= mc_enc(32, 8, obj_type[4]);
-       cmd.params[1] |= mc_enc(40, 8, obj_type[5]);
-       cmd.params[1] |= mc_enc(48, 8, obj_type[6]);
-       cmd.params[1] |= mc_enc(56, 8, obj_type[7]);
-       cmd.params[2] |= mc_enc(0, 8, obj_type[8]);
-       cmd.params[2] |= mc_enc(8, 8, obj_type[9]);
-       cmd.params[2] |= mc_enc(16, 8, obj_type[10]);
-       cmd.params[2] |= mc_enc(24, 8, obj_type[11]);
-       cmd.params[2] |= mc_enc(32, 8, obj_type[12]);
-       cmd.params[2] |= mc_enc(40, 8, obj_type[13]);
-       cmd.params[2] |= mc_enc(48, 8, obj_type[14]);
-       cmd.params[2] |= mc_enc(56, 8, obj_type[15]);
+       cmd_params = (struct dprc_cmd_get_obj_desc *)cmd.params;
+       cmd_params->obj_id = cpu_to_le32(obj_id);
+       strncpy(cmd_params->type, obj_type, 16);
+       cmd_params->type[15] = '\0';
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -1037,46 +972,19 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       obj_desc->id = (int)mc_dec(cmd.params[0], 32, 32);
-       obj_desc->vendor = (u16)mc_dec(cmd.params[1], 0, 16);
-       obj_desc->vendor = (u8)mc_dec(cmd.params[1], 16, 8);
-       obj_desc->region_count = (u8)mc_dec(cmd.params[1], 24, 8);
-       obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32);
-       obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16);
-       obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16);
-       obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
-       obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8);
-       obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8);
-       obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8);
-       obj_desc->type[3] = (char)mc_dec(cmd.params[3], 24, 8);
-       obj_desc->type[4] = (char)mc_dec(cmd.params[3], 32, 8);
-       obj_desc->type[5] = (char)mc_dec(cmd.params[3], 40, 8);
-       obj_desc->type[6] = (char)mc_dec(cmd.params[3], 48, 8);
-       obj_desc->type[7] = (char)mc_dec(cmd.params[3], 56, 8);
-       obj_desc->type[8] = (char)mc_dec(cmd.params[4], 0, 8);
-       obj_desc->type[9] = (char)mc_dec(cmd.params[4], 8, 8);
-       obj_desc->type[10] = (char)mc_dec(cmd.params[4], 16, 8);
-       obj_desc->type[11] = (char)mc_dec(cmd.params[4], 24, 8);
-       obj_desc->type[12] = (char)mc_dec(cmd.params[4], 32, 8);
-       obj_desc->type[13] = (char)mc_dec(cmd.params[4], 40, 8);
-       obj_desc->type[14] = (char)mc_dec(cmd.params[4], 48, 8);
-       obj_desc->type[15] = (char)mc_dec(cmd.params[4], 56, 8);
-       obj_desc->label[0] = (char)mc_dec(cmd.params[5], 0, 8);
-       obj_desc->label[1] = (char)mc_dec(cmd.params[5], 8, 8);
-       obj_desc->label[2] = (char)mc_dec(cmd.params[5], 16, 8);
-       obj_desc->label[3] = (char)mc_dec(cmd.params[5], 24, 8);
-       obj_desc->label[4] = (char)mc_dec(cmd.params[5], 32, 8);
-       obj_desc->label[5] = (char)mc_dec(cmd.params[5], 40, 8);
-       obj_desc->label[6] = (char)mc_dec(cmd.params[5], 48, 8);
-       obj_desc->label[7] = (char)mc_dec(cmd.params[5], 56, 8);
-       obj_desc->label[8] = (char)mc_dec(cmd.params[6], 0, 8);
-       obj_desc->label[9] = (char)mc_dec(cmd.params[6], 8, 8);
-       obj_desc->label[10] = (char)mc_dec(cmd.params[6], 16, 8);
-       obj_desc->label[11] = (char)mc_dec(cmd.params[6], 24, 8);
-       obj_desc->label[12] = (char)mc_dec(cmd.params[6], 32, 8);
-       obj_desc->label[13] = (char)mc_dec(cmd.params[6], 40, 8);
-       obj_desc->label[14] = (char)mc_dec(cmd.params[6], 48, 8);
-       obj_desc->label[15] = (char)mc_dec(cmd.params[6], 56, 8);
+       rsp_params = (struct dprc_rsp_get_obj_desc *)cmd.params;
+       obj_desc->id = le32_to_cpu(rsp_params->id);
+       obj_desc->vendor = le16_to_cpu(rsp_params->vendor);
+       obj_desc->irq_count = rsp_params->irq_count;
+       obj_desc->region_count = rsp_params->region_count;
+       obj_desc->state = le32_to_cpu(rsp_params->state);
+       obj_desc->ver_major = le16_to_cpu(rsp_params->version_major);
+       obj_desc->ver_minor = le16_to_cpu(rsp_params->version_minor);
+       obj_desc->flags = le16_to_cpu(rsp_params->flags);
+       strncpy(obj_desc->type, rsp_params->type, 16);
+       obj_desc->type[15] = '\0';
+       strncpy(obj_desc->label, rsp_params->label, 16);
+       obj_desc->label[15] = '\0';
 
        return 0;
 }
@@ -1103,32 +1011,20 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
                     struct dprc_irq_cfg *irq_cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_set_obj_irq *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
-       cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
-       cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
-       cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
-       cmd.params[2] |= mc_enc(32, 32, obj_id);
-       cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
-       cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
-       cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
-       cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
-       cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
-       cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
-       cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
-       cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
-       cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
-       cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
-       cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
-       cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
-       cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
-       cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
-       cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
-       cmd.params[4] |= mc_enc(56, 8, obj_type[15]);
+       cmd_params = (struct dprc_cmd_set_obj_irq *)cmd.params;
+       cmd_params->irq_val = cpu_to_le32(irq_cfg->val);
+       cmd_params->irq_index = irq_index;
+       cmd_params->irq_addr = cpu_to_le64(irq_cfg->paddr);
+       cmd_params->irq_num = cpu_to_le32(irq_cfg->irq_num);
+       cmd_params->obj_id = cpu_to_le32(obj_id);
+       strncpy(cmd_params->obj_type, obj_type, 16);
+       cmd_params->obj_type[15] = '\0';
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -1159,30 +1055,19 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
                     struct dprc_irq_cfg *irq_cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_obj_irq *cmd_params;
+       struct dprc_rsp_get_obj_irq *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(0, 32, obj_id);
-       cmd.params[0] |= mc_enc(32, 8, irq_index);
-       cmd.params[1] |= mc_enc(0, 8, obj_type[0]);
-       cmd.params[1] |= mc_enc(8, 8, obj_type[1]);
-       cmd.params[1] |= mc_enc(16, 8, obj_type[2]);
-       cmd.params[1] |= mc_enc(24, 8, obj_type[3]);
-       cmd.params[1] |= mc_enc(32, 8, obj_type[4]);
-       cmd.params[1] |= mc_enc(40, 8, obj_type[5]);
-       cmd.params[1] |= mc_enc(48, 8, obj_type[6]);
-       cmd.params[1] |= mc_enc(56, 8, obj_type[7]);
-       cmd.params[2] |= mc_enc(0, 8, obj_type[8]);
-       cmd.params[2] |= mc_enc(8, 8, obj_type[9]);
-       cmd.params[2] |= mc_enc(16, 8, obj_type[10]);
-       cmd.params[2] |= mc_enc(24, 8, obj_type[11]);
-       cmd.params[2] |= mc_enc(32, 8, obj_type[12]);
-       cmd.params[2] |= mc_enc(40, 8, obj_type[13]);
-       cmd.params[2] |= mc_enc(48, 8, obj_type[14]);
-       cmd.params[2] |= mc_enc(56, 8, obj_type[15]);
+       cmd_params = (struct dprc_cmd_get_obj_irq *)cmd.params;
+       cmd_params->obj_id = cpu_to_le32(obj_id);
+       cmd_params->irq_index = irq_index;
+       strncpy(cmd_params->obj_type, obj_type, 16);
+       cmd_params->obj_type[15] = '\0';
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -1190,10 +1075,11 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
-       irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
-       irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
-       *type = (int)mc_dec(cmd.params[2], 32, 32);
+       rsp_params = (struct dprc_rsp_get_obj_irq *)cmd.params;
+       irq_cfg->val = le32_to_cpu(rsp_params->irq_val);
+       irq_cfg->paddr = le64_to_cpu(rsp_params->irq_addr);
+       irq_cfg->irq_num = le32_to_cpu(rsp_params->irq_num);
+       *type = le32_to_cpu(rsp_params->type);
 
        return 0;
 }
@@ -1218,29 +1104,16 @@ int dprc_get_res_count(struct fsl_mc_io *mc_io,
                       int *res_count)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_res_count *cmd_params;
+       struct dprc_rsp_get_res_count *rsp_params;
        int err;
 
-       *res_count = 0;
-
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT,
                                          cmd_flags, token);
-       cmd.params[1] |= mc_enc(0, 8, type[0]);
-       cmd.params[1] |= mc_enc(8, 8, type[1]);
-       cmd.params[1] |= mc_enc(16, 8, type[2]);
-       cmd.params[1] |= mc_enc(24, 8, type[3]);
-       cmd.params[1] |= mc_enc(32, 8, type[4]);
-       cmd.params[1] |= mc_enc(40, 8, type[5]);
-       cmd.params[1] |= mc_enc(48, 8, type[6]);
-       cmd.params[1] |= mc_enc(56, 8, type[7]);
-       cmd.params[2] |= mc_enc(0, 8, type[8]);
-       cmd.params[2] |= mc_enc(8, 8, type[9]);
-       cmd.params[2] |= mc_enc(16, 8, type[10]);
-       cmd.params[2] |= mc_enc(24, 8, type[11]);
-       cmd.params[2] |= mc_enc(32, 8, type[12]);
-       cmd.params[2] |= mc_enc(40, 8, type[13]);
-       cmd.params[2] |= mc_enc(48, 8, type[14]);
-       cmd.params[2] |= mc_enc(56, 8, '\0');
+       cmd_params = (struct dprc_cmd_get_res_count *)cmd.params;
+       strncpy(cmd_params->type, type, 16);
+       cmd_params->type[15] = '\0';
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -1248,7 +1121,8 @@ int dprc_get_res_count(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       *res_count = mc_dec(cmd.params[0], 0, 32);
+       rsp_params = (struct dprc_rsp_get_res_count *)cmd.params;
+       *res_count = le32_to_cpu(rsp_params->res_count);
 
        return 0;
 }
@@ -1271,30 +1145,19 @@ int dprc_get_res_ids(struct fsl_mc_io *mc_io,
                     struct dprc_res_ids_range_desc *range_desc)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_res_ids *cmd_params;
+       struct dprc_rsp_get_res_ids *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(42, 7, range_desc->iter_status);
-       cmd.params[1] |= mc_enc(0, 32, range_desc->base_id);
-       cmd.params[1] |= mc_enc(32, 32, range_desc->last_id);
-       cmd.params[2] |= mc_enc(0, 8, type[0]);
-       cmd.params[2] |= mc_enc(8, 8, type[1]);
-       cmd.params[2] |= mc_enc(16, 8, type[2]);
-       cmd.params[2] |= mc_enc(24, 8, type[3]);
-       cmd.params[2] |= mc_enc(32, 8, type[4]);
-       cmd.params[2] |= mc_enc(40, 8, type[5]);
-       cmd.params[2] |= mc_enc(48, 8, type[6]);
-       cmd.params[2] |= mc_enc(56, 8, type[7]);
-       cmd.params[3] |= mc_enc(0, 8, type[8]);
-       cmd.params[3] |= mc_enc(8, 8, type[9]);
-       cmd.params[3] |= mc_enc(16, 8, type[10]);
-       cmd.params[3] |= mc_enc(24, 8, type[11]);
-       cmd.params[3] |= mc_enc(32, 8, type[12]);
-       cmd.params[3] |= mc_enc(40, 8, type[13]);
-       cmd.params[3] |= mc_enc(48, 8, type[14]);
-       cmd.params[3] |= mc_enc(56, 8, '\0');
+       cmd_params = (struct dprc_cmd_get_res_ids *)cmd.params;
+       cmd_params->iter_status = range_desc->iter_status;
+       cmd_params->base_id = cpu_to_le32(range_desc->base_id);
+       cmd_params->last_id = cpu_to_le32(range_desc->last_id);
+       strncpy(cmd_params->type, type, 16);
+       cmd_params->type[15] = '\0';
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -1302,9 +1165,10 @@ int dprc_get_res_ids(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       range_desc->iter_status = mc_dec(cmd.params[0], 42, 7);
-       range_desc->base_id = mc_dec(cmd.params[1], 0, 32);
-       range_desc->last_id = mc_dec(cmd.params[1], 32, 32);
+       rsp_params = (struct dprc_rsp_get_res_ids *)cmd.params;
+       range_desc->iter_status = rsp_params->iter_status;
+       range_desc->base_id = le32_to_cpu(rsp_params->base_id);
+       range_desc->last_id = le32_to_cpu(rsp_params->last_id);
 
        return 0;
 }
@@ -1331,29 +1195,18 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
                        struct dprc_region_desc *region_desc)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_obj_region *cmd_params;
+       struct dprc_rsp_get_obj_region *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
                                          cmd_flags, token);
-       cmd.params[0] |= mc_enc(0, 32, obj_id);
-       cmd.params[0] |= mc_enc(48, 8, region_index);
-       cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
-       cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
-       cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
-       cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
-       cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
-       cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
-       cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
-       cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
-       cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
-       cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
-       cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
-       cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
-       cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
-       cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
-       cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
-       cmd.params[4] |= mc_enc(56, 8, '\0');
+       cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
+       cmd_params->obj_id = cpu_to_le32(obj_id);
+       cmd_params->region_index = region_index;
+       strncpy(cmd_params->obj_type, obj_type, 16);
+       cmd_params->obj_type[15] = '\0';
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -1361,8 +1214,9 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       region_desc->base_offset = mc_dec(cmd.params[1], 0, 64);
-       region_desc->size = mc_dec(cmd.params[2], 0, 32);
+       rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
+       region_desc->base_offset = le64_to_cpu(rsp_params->base_addr);
+       region_desc->size = le32_to_cpu(rsp_params->size);
 
        return 0;
 }
@@ -1387,45 +1241,18 @@ int dprc_set_obj_label(struct fsl_mc_io *mc_io,
                       char *label)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_set_obj_label *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL,
                                          cmd_flags,
                                          token);
-
-       cmd.params[0] |= mc_enc(0, 32, obj_id);
-       cmd.params[1] |= mc_enc(0, 8, label[0]);
-       cmd.params[1] |= mc_enc(8, 8, label[1]);
-       cmd.params[1] |= mc_enc(16, 8, label[2]);
-       cmd.params[1] |= mc_enc(24, 8, label[3]);
-       cmd.params[1] |= mc_enc(32, 8, label[4]);
-       cmd.params[1] |= mc_enc(40, 8, label[5]);
-       cmd.params[1] |= mc_enc(48, 8, label[6]);
-       cmd.params[1] |= mc_enc(56, 8, label[7]);
-       cmd.params[2] |= mc_enc(0, 8, label[8]);
-       cmd.params[2] |= mc_enc(8, 8, label[9]);
-       cmd.params[2] |= mc_enc(16, 8, label[10]);
-       cmd.params[2] |= mc_enc(24, 8, label[11]);
-       cmd.params[2] |= mc_enc(32, 8, label[12]);
-       cmd.params[2] |= mc_enc(40, 8, label[13]);
-       cmd.params[2] |= mc_enc(48, 8, label[14]);
-       cmd.params[2] |= mc_enc(56, 8, label[15]);
-       cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
-       cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
-       cmd.params[3] |= mc_enc(16, 8, obj_type[2]);
-       cmd.params[3] |= mc_enc(24, 8, obj_type[3]);
-       cmd.params[3] |= mc_enc(32, 8, obj_type[4]);
-       cmd.params[3] |= mc_enc(40, 8, obj_type[5]);
-       cmd.params[3] |= mc_enc(48, 8, obj_type[6]);
-       cmd.params[3] |= mc_enc(56, 8, obj_type[7]);
-       cmd.params[4] |= mc_enc(0, 8, obj_type[8]);
-       cmd.params[4] |= mc_enc(8, 8, obj_type[9]);
-       cmd.params[4] |= mc_enc(16, 8, obj_type[10]);
-       cmd.params[4] |= mc_enc(24, 8, obj_type[11]);
-       cmd.params[4] |= mc_enc(32, 8, obj_type[12]);
-       cmd.params[4] |= mc_enc(40, 8, obj_type[13]);
-       cmd.params[4] |= mc_enc(48, 8, obj_type[14]);
-       cmd.params[4] |= mc_enc(56, 8, obj_type[15]);
+       cmd_params = (struct dprc_cmd_set_obj_label *)cmd.params;
+       cmd_params->obj_id = cpu_to_le32(obj_id);
+       strncpy(cmd_params->label, label, 16);
+       cmd_params->label[15] = '\0';
+       strncpy(cmd_params->obj_type, obj_type, 16);
+       cmd_params->obj_type[15] = '\0';
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -1453,49 +1280,23 @@ int dprc_connect(struct fsl_mc_io *mc_io,
                 const struct dprc_connection_cfg *cfg)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_connect *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(0, 32, endpoint1->id);
-       cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id);
-       cmd.params[1] |= mc_enc(0, 32, endpoint2->id);
-       cmd.params[1] |= mc_enc(32, 32, endpoint2->if_id);
-       cmd.params[2] |= mc_enc(0, 8, endpoint1->type[0]);
-       cmd.params[2] |= mc_enc(8, 8, endpoint1->type[1]);
-       cmd.params[2] |= mc_enc(16, 8, endpoint1->type[2]);
-       cmd.params[2] |= mc_enc(24, 8, endpoint1->type[3]);
-       cmd.params[2] |= mc_enc(32, 8, endpoint1->type[4]);
-       cmd.params[2] |= mc_enc(40, 8, endpoint1->type[5]);
-       cmd.params[2] |= mc_enc(48, 8, endpoint1->type[6]);
-       cmd.params[2] |= mc_enc(56, 8, endpoint1->type[7]);
-       cmd.params[3] |= mc_enc(0, 8, endpoint1->type[8]);
-       cmd.params[3] |= mc_enc(8, 8, endpoint1->type[9]);
-       cmd.params[3] |= mc_enc(16, 8, endpoint1->type[10]);
-       cmd.params[3] |= mc_enc(24, 8, endpoint1->type[11]);
-       cmd.params[3] |= mc_enc(32, 8, endpoint1->type[12]);
-       cmd.params[3] |= mc_enc(40, 8, endpoint1->type[13]);
-       cmd.params[3] |= mc_enc(48, 8, endpoint1->type[14]);
-       cmd.params[3] |= mc_enc(56, 8, endpoint1->type[15]);
-       cmd.params[4] |= mc_enc(0, 32, cfg->max_rate);
-       cmd.params[4] |= mc_enc(32, 32, cfg->committed_rate);
-       cmd.params[5] |= mc_enc(0, 8, endpoint2->type[0]);
-       cmd.params[5] |= mc_enc(8, 8, endpoint2->type[1]);
-       cmd.params[5] |= mc_enc(16, 8, endpoint2->type[2]);
-       cmd.params[5] |= mc_enc(24, 8, endpoint2->type[3]);
-       cmd.params[5] |= mc_enc(32, 8, endpoint2->type[4]);
-       cmd.params[5] |= mc_enc(40, 8, endpoint2->type[5]);
-       cmd.params[5] |= mc_enc(48, 8, endpoint2->type[6]);
-       cmd.params[5] |= mc_enc(56, 8, endpoint2->type[7]);
-       cmd.params[6] |= mc_enc(0, 8, endpoint2->type[8]);
-       cmd.params[6] |= mc_enc(8, 8, endpoint2->type[9]);
-       cmd.params[6] |= mc_enc(16, 8, endpoint2->type[10]);
-       cmd.params[6] |= mc_enc(24, 8, endpoint2->type[11]);
-       cmd.params[6] |= mc_enc(32, 8, endpoint2->type[12]);
-       cmd.params[6] |= mc_enc(40, 8, endpoint2->type[13]);
-       cmd.params[6] |= mc_enc(48, 8, endpoint2->type[14]);
-       cmd.params[6] |= mc_enc(56, 8, endpoint2->type[15]);
+       cmd_params = (struct dprc_cmd_connect *)cmd.params;
+       cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+       cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+       cmd_params->ep2_id = cpu_to_le32(endpoint2->id);
+       cmd_params->ep2_interface_id = cpu_to_le32(endpoint2->if_id);
+       strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+       cmd_params->ep1_type[15] = '\0';
+       cmd_params->max_rate = cpu_to_le32(cfg->max_rate);
+       cmd_params->committed_rate = cpu_to_le32(cfg->committed_rate);
+       strncpy(cmd_params->ep2_type, endpoint2->type, 16);
+       cmd_params->ep2_type[15] = '\0';
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -1516,29 +1317,17 @@ int dprc_disconnect(struct fsl_mc_io *mc_io,
                    const struct dprc_endpoint *endpoint)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_disconnect *cmd_params;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(0, 32, endpoint->id);
-       cmd.params[0] |= mc_enc(32, 32, endpoint->if_id);
-       cmd.params[1] |= mc_enc(0, 8, endpoint->type[0]);
-       cmd.params[1] |= mc_enc(8, 8, endpoint->type[1]);
-       cmd.params[1] |= mc_enc(16, 8, endpoint->type[2]);
-       cmd.params[1] |= mc_enc(24, 8, endpoint->type[3]);
-       cmd.params[1] |= mc_enc(32, 8, endpoint->type[4]);
-       cmd.params[1] |= mc_enc(40, 8, endpoint->type[5]);
-       cmd.params[1] |= mc_enc(48, 8, endpoint->type[6]);
-       cmd.params[1] |= mc_enc(56, 8, endpoint->type[7]);
-       cmd.params[2] |= mc_enc(0, 8, endpoint->type[8]);
-       cmd.params[2] |= mc_enc(8, 8, endpoint->type[9]);
-       cmd.params[2] |= mc_enc(16, 8, endpoint->type[10]);
-       cmd.params[2] |= mc_enc(24, 8, endpoint->type[11]);
-       cmd.params[2] |= mc_enc(32, 8, endpoint->type[12]);
-       cmd.params[2] |= mc_enc(40, 8, endpoint->type[13]);
-       cmd.params[2] |= mc_enc(48, 8, endpoint->type[14]);
-       cmd.params[2] |= mc_enc(56, 8, endpoint->type[15]);
+       cmd_params = (struct dprc_cmd_disconnect *)cmd.params;
+       cmd_params->id = cpu_to_le32(endpoint->id);
+       cmd_params->interface_id = cpu_to_le32(endpoint->if_id);
+       strncpy(cmd_params->type, endpoint->type, 16);
+       cmd_params->type[15] = '\0';
 
        /* send command to mc*/
        return mc_send_command(mc_io, &cmd);
@@ -1567,30 +1356,19 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
                        int *state)
 {
        struct mc_command cmd = { 0 };
+       struct dprc_cmd_get_connection *cmd_params;
+       struct dprc_rsp_get_connection *rsp_params;
        int err;
 
        /* prepare command */
        cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION,
                                          cmd_flags,
                                          token);
-       cmd.params[0] |= mc_enc(0, 32, endpoint1->id);
-       cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id);
-       cmd.params[1] |= mc_enc(0, 8, endpoint1->type[0]);
-       cmd.params[1] |= mc_enc(8, 8, endpoint1->type[1]);
-       cmd.params[1] |= mc_enc(16, 8, endpoint1->type[2]);
-       cmd.params[1] |= mc_enc(24, 8, endpoint1->type[3]);
-       cmd.params[1] |= mc_enc(32, 8, endpoint1->type[4]);
-       cmd.params[1] |= mc_enc(40, 8, endpoint1->type[5]);
-       cmd.params[1] |= mc_enc(48, 8, endpoint1->type[6]);
-       cmd.params[1] |= mc_enc(56, 8, endpoint1->type[7]);
-       cmd.params[2] |= mc_enc(0, 8, endpoint1->type[8]);
-       cmd.params[2] |= mc_enc(8, 8, endpoint1->type[9]);
-       cmd.params[2] |= mc_enc(16, 8, endpoint1->type[10]);
-       cmd.params[2] |= mc_enc(24, 8, endpoint1->type[11]);
-       cmd.params[2] |= mc_enc(32, 8, endpoint1->type[12]);
-       cmd.params[2] |= mc_enc(40, 8, endpoint1->type[13]);
-       cmd.params[2] |= mc_enc(48, 8, endpoint1->type[14]);
-       cmd.params[2] |= mc_enc(56, 8, endpoint1->type[15]);
+       cmd_params = (struct dprc_cmd_get_connection *)cmd.params;
+       cmd_params->ep1_id = cpu_to_le32(endpoint1->id);
+       cmd_params->ep1_interface_id = cpu_to_le32(endpoint1->if_id);
+       strncpy(cmd_params->ep1_type, endpoint1->type, 16);
+       cmd_params->ep1_type[15] = '\0';
 
        /* send command to mc*/
        err = mc_send_command(mc_io, &cmd);
@@ -1598,25 +1376,12 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
                return err;
 
        /* retrieve response parameters */
-       endpoint2->id = mc_dec(cmd.params[3], 0, 32);
-       endpoint2->if_id = mc_dec(cmd.params[3], 32, 32);
-       endpoint2->type[0] = mc_dec(cmd.params[4], 0, 8);
-       endpoint2->type[1] = mc_dec(cmd.params[4], 8, 8);
-       endpoint2->type[2] = mc_dec(cmd.params[4], 16, 8);
-       endpoint2->type[3] = mc_dec(cmd.params[4], 24, 8);
-       endpoint2->type[4] = mc_dec(cmd.params[4], 32, 8);
-       endpoint2->type[5] = mc_dec(cmd.params[4], 40, 8);
-       endpoint2->type[6] = mc_dec(cmd.params[4], 48, 8);
-       endpoint2->type[7] = mc_dec(cmd.params[4], 56, 8);
-       endpoint2->type[8] = mc_dec(cmd.params[5], 0, 8);
-       endpoint2->type[9] = mc_dec(cmd.params[5], 8, 8);
-       endpoint2->type[10] = mc_dec(cmd.params[5], 16, 8);
-       endpoint2->type[11] = mc_dec(cmd.params[5], 24, 8);
-       endpoint2->type[12] = mc_dec(cmd.params[5], 32, 8);
-       endpoint2->type[13] = mc_dec(cmd.params[5], 40, 8);
-       endpoint2->type[14] = mc_dec(cmd.params[5], 48, 8);
-       endpoint2->type[15] = mc_dec(cmd.params[5], 56, 8);
-       *state = mc_dec(cmd.params[6], 0, 32);
+       rsp_params = (struct dprc_rsp_get_connection *)cmd.params;
+       endpoint2->id = le32_to_cpu(rsp_params->ep2_id);
+       endpoint2->if_id = le32_to_cpu(rsp_params->ep2_interface_id);
+       strncpy(endpoint2->type, rsp_params->ep2_type, 16);
+       endpoint2->type[15] = '\0';
+       *state = le32_to_cpu(rsp_params->state);
 
        return 0;
 }
index fb08f22a7f9c93ed87339a7b67dc00d00be24b1b..e59d85060c7b6cfb26550b5d09a8c94258492b86 100644 (file)
@@ -717,7 +717,7 @@ static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev)
        return 0;
 }
 
-static const struct fsl_mc_device_match_id match_id_table[] = {
+static const struct fsl_mc_device_id match_id_table[] = {
        {
         .vendor = FSL_MC_VENDOR_FREESCALE,
         .obj_type = "dpbp",
index 405364307561aaa6f3d5bc676b75c9835234413b..db3afdbdf4ae2fe6b191ef7b150af1fc2533c433 100644 (file)
@@ -24,8 +24,6 @@
 
 static struct kmem_cache *mc_dev_cache;
 
-static bool fsl_mc_is_root_dprc(struct device *dev);
-
 /**
  * fsl_mc_bus_match - device to driver matching callback
  * @dev: the MC object device structure to match against
@@ -36,7 +34,7 @@ static bool fsl_mc_is_root_dprc(struct device *dev);
  */
 static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
 {
-       const struct fsl_mc_device_match_id *id;
+       const struct fsl_mc_device_id *id;
        struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
        struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
        bool found = false;
@@ -78,14 +76,45 @@ out:
  */
 static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
 {
-       pr_debug("%s invoked\n", __func__);
+       struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+       if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
+                          mc_dev->obj_desc.vendor,
+                          mc_dev->obj_desc.type))
+               return -ENOMEM;
+
        return 0;
 }
 
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+
+       return sprintf(buf, "fsl-mc:v%08Xd%s\n", mc_dev->obj_desc.vendor,
+                      mc_dev->obj_desc.type);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *fsl_mc_dev_attrs[] = {
+       &dev_attr_modalias.attr,
+       NULL,
+};
+
+static const struct attribute_group fsl_mc_dev_group = {
+       .attrs = fsl_mc_dev_attrs,
+};
+
+static const struct attribute_group *fsl_mc_dev_groups[] = {
+       &fsl_mc_dev_group,
+       NULL,
+};
+
 struct bus_type fsl_mc_bus_type = {
        .name = "fsl-mc",
        .match = fsl_mc_bus_match,
        .uevent = fsl_mc_bus_uevent,
+       .dev_groups = fsl_mc_dev_groups,
 };
 EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
 
@@ -216,19 +245,6 @@ static void fsl_mc_get_root_dprc(struct device *dev,
        }
 }
 
-/**
- * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
- */
-static bool fsl_mc_is_root_dprc(struct device *dev)
-{
-       struct device *root_dprc_dev;
-
-       fsl_mc_get_root_dprc(dev, &root_dprc_dev);
-       if (!root_dprc_dev)
-               return false;
-       return dev == root_dprc_dev;
-}
-
 static int get_dprc_attr(struct fsl_mc_io *mc_io,
                         int container_id, struct dprc_attributes *attr)
 {
@@ -392,6 +408,19 @@ error_cleanup_regions:
        return error;
 }
 
+/**
+ * fsl_mc_is_root_dprc - function to check if a given device is a root dprc
+ */
+bool fsl_mc_is_root_dprc(struct device *dev)
+{
+       struct device *root_dprc_dev;
+
+       fsl_mc_get_root_dprc(dev, &root_dprc_dev);
+       if (!root_dprc_dev)
+               return false;
+       return dev == root_dprc_dev;
+}
+
 /**
  * Add a newly discovered MC object device to be visible in Linux
  */
@@ -550,10 +579,6 @@ void fsl_mc_device_remove(struct fsl_mc_device *mc_dev)
 
        if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
                mc_bus = to_fsl_mc_bus(mc_dev);
-               if (mc_dev->mc_io) {
-                       fsl_destroy_mc_io(mc_dev->mc_io);
-                       mc_dev->mc_io = NULL;
-               }
 
                if (fsl_mc_is_root_dprc(&mc_dev->dev)) {
                        if (atomic_read(&root_dprc_count) > 0)
@@ -781,6 +806,10 @@ static int fsl_mc_bus_remove(struct platform_device *pdev)
                return -EINVAL;
 
        fsl_mc_device_remove(mc->root_mc_bus_dev);
+
+       fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io);
+       mc->root_mc_bus_dev->mc_io = NULL;
+
        dev_info(&pdev->dev, "Root MC bus device removed");
        return 0;
 }
index e202b2b88c638a4f614b72cf02545be809eddfea..c7be156ae5e06cb49c6879bd011b3a40e6c4a935 100644 (file)
 #include "../include/mc-sys.h"
 #include "dprc-cmd.h"
 
+/*
+ * Generate a unique ID identifying the interrupt (only used within the MSI
+ * irqdomain.  Combine the icid with the interrupt index.
+ */
+static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
+                                               struct msi_desc *desc)
+{
+       /*
+        * Make the base hwirq value for ICID*10000 so it is readable
+        * as a decimal value in /proc/interrupts.
+        */
+       return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+}
+
 static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
                                struct msi_desc *desc)
 {
        arg->desc = desc;
-       arg->hwirq = (irq_hw_number_t)desc->fsl_mc.msi_index;
+       arg->hwirq = fsl_mc_domain_calc_hwirq(to_fsl_mc_device(desc->dev),
+                                             desc);
 }
 
 static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
index 810a611c1cb042f3ae4ff80f957b368f8b124218..0c185abe665e5f4bf0e0280229dd21bb812c919e 100644 (file)
 #define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS    10
 #define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS    500
 
-#define MC_CMD_HDR_READ_CMDID(_hdr) \
-       ((u16)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S))
+static enum mc_cmd_status mc_cmd_hdr_read_status(struct mc_command *cmd)
+{
+       struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+
+       return (enum mc_cmd_status)hdr->status;
+}
+
+static u16 mc_cmd_hdr_read_cmdid(struct mc_command *cmd)
+{
+       struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+       u16 cmd_id = le16_to_cpu(hdr->cmd_id);
+
+       return (cmd_id & MC_CMD_HDR_CMDID_MASK) >> MC_CMD_HDR_CMDID_SHIFT;
+}
 
 /**
  * Creates an MC I/O object
@@ -261,10 +273,11 @@ static inline void mc_write_command(struct mc_command __iomem *portal,
 
        /* copy command parameters into the portal */
        for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
-               writeq(cmd->params[i], &portal->params[i]);
+               __raw_writeq(cmd->params[i], &portal->params[i]);
+       __iowmb();
 
        /* submit the command by writing the header */
-       writeq(cmd->header, &portal->header);
+       __raw_writeq(cmd->header, &portal->header);
 }
 
 /**
@@ -284,14 +297,17 @@ static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem *
        enum mc_cmd_status status;
 
        /* Copy command response header from MC portal: */
-       resp->header = readq(&portal->header);
-       status = MC_CMD_HDR_READ_STATUS(resp->header);
+       __iormb();
+       resp->header = __raw_readq(&portal->header);
+       __iormb();
+       status = mc_cmd_hdr_read_status(resp);
        if (status != MC_CMD_STATUS_OK)
                return status;
 
        /* Copy command response data from MC portal: */
        for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++)
-               resp->params[i] = readq(&portal->params[i]);
+               resp->params[i] = __raw_readq(&portal->params[i]);
+       __iormb();
 
        return status;
 }
@@ -331,10 +347,8 @@ static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io,
                        dev_dbg(mc_io->dev,
                                "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
                                 mc_io->portal_phys_addr,
-                                (unsigned int)
-                                       MC_CMD_HDR_READ_TOKEN(cmd->header),
-                                (unsigned int)
-                                       MC_CMD_HDR_READ_CMDID(cmd->header));
+                                (unsigned int)mc_cmd_hdr_read_token(cmd),
+                                (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
 
                        return -ETIMEDOUT;
                }
@@ -373,10 +387,8 @@ static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io,
                        dev_dbg(mc_io->dev,
                                "MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n",
                                 mc_io->portal_phys_addr,
-                                (unsigned int)
-                                       MC_CMD_HDR_READ_TOKEN(cmd->header),
-                                (unsigned int)
-                                       MC_CMD_HDR_READ_CMDID(cmd->header));
+                                (unsigned int)mc_cmd_hdr_read_token(cmd),
+                                (unsigned int)mc_cmd_hdr_read_cmdid(cmd));
 
                        return -ETIMEDOUT;
                }
@@ -429,8 +441,8 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd)
                dev_dbg(mc_io->dev,
                        "MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n",
                         mc_io->portal_phys_addr,
-                        (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header),
-                        (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header),
+                        (unsigned int)mc_cmd_hdr_read_token(cmd),
+                        (unsigned int)mc_cmd_hdr_read_cmdid(cmd),
                         mc_status_to_string(status),
                         (unsigned int)status);
 
index c57b454a291216e65bfff0e780c8ee3a308a87ae..4828ccd0cffd0716aaac39fc98e0025adaa7febb 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2013-2014 Freescale Semiconductor Inc.
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are met:
 
 #define DPBP_CMDID_SET_NOTIFICATIONS           0x01b0
 #define DPBP_CMDID_GET_NOTIFICATIONS           0x01b1
+
+struct dpbp_cmd_open {
+       __le32 dpbp_id;
+};
+
+#define DPBP_ENABLE                    0x1
+
+struct dpbp_rsp_is_enabled {
+       u8 enabled;
+};
+
+struct dpbp_cmd_set_irq {
+       /* cmd word 0 */
+       u8 irq_index;
+       u8 pad[3];
+       __le32 irq_val;
+       /* cmd word 1 */
+       __le64 irq_addr;
+       /* cmd word 2 */
+       __le32 irq_num;
+};
+
+struct dpbp_cmd_get_irq {
+       __le32 pad;
+       u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq {
+       /* response word 0 */
+       __le32 irq_val;
+       __le32 pad;
+       /* response word 1 */
+       __le64 irq_addr;
+       /* response word 2 */
+       __le32 irq_num;
+       __le32 type;
+};
+
+struct dpbp_cmd_set_irq_enable {
+       u8 enable;
+       u8 pad[3];
+       u8 irq_index;
+};
+
+struct dpbp_cmd_get_irq_enable {
+       __le32 pad;
+       u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_enable {
+       u8 enabled;
+};
+
+struct dpbp_cmd_set_irq_mask {
+       __le32 mask;
+       u8 irq_index;
+};
+
+struct dpbp_cmd_get_irq_mask {
+       __le32 pad;
+       u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_mask {
+       __le32 mask;
+};
+
+struct dpbp_cmd_get_irq_status {
+       __le32 status;
+       u8 irq_index;
+};
+
+struct dpbp_rsp_get_irq_status {
+       __le32 status;
+};
+
+struct dpbp_cmd_clear_irq_status {
+       __le32 status;
+       u8 irq_index;
+};
+
+struct dpbp_rsp_get_attributes {
+       /* response word 0 */
+       __le16 pad;
+       __le16 bpid;
+       __le32 id;
+       /* response word 1 */
+       __le16 version_major;
+       __le16 version_minor;
+};
+
+struct dpbp_cmd_set_notifications {
+       /* cmd word 0 */
+       __le32 depletion_entry;
+       __le32 depletion_exit;
+       /* cmd word 1 */
+       __le32 surplus_entry;
+       __le32 surplus_exit;
+       /* cmd word 2 */
+       __le16 options;
+       __le16 pad[3];
+       /* cmd word 3 */
+       __le64 message_ctx;
+       /* cmd word 4 */
+       __le64 message_iova;
+};
+
+struct dpbp_rsp_get_notifications {
+       /* response word 0 */
+       __le32 depletion_entry;
+       __le32 depletion_exit;
+       /* response word 1 */
+       __le32 surplus_entry;
+       __le32 surplus_exit;
+       /* response word 2 */
+       __le16 options;
+       __le16 pad[3];
+       /* response word 3 */
+       __le64 message_ctx;
+       /* response word 4 */
+       __le64 message_iova;
+};
+
 #endif /* _FSL_DPBP_CMD_H */
index 65277e3de44d537ca51df63f98768374f7884847..5decb9890c31d727f968818f1175934e2fde91dc 100644 (file)
 
 #define MC_CMD_NUM_OF_PARAMS   7
 
-#define MAKE_UMASK64(_width) \
-       ((u64)((_width) < 64 ? ((u64)1 << (_width)) - 1 : -1))
-
-static inline u64 mc_enc(int lsoffset, int width, u64 val)
-{
-       return (u64)(((u64)val & MAKE_UMASK64(width)) << lsoffset);
-}
-
-static inline u64 mc_dec(u64 val, int lsoffset, int width)
-{
-       return (u64)((val >> lsoffset) & MAKE_UMASK64(width));
-}
+struct mc_cmd_header {
+       u8 src_id;
+       u8 flags_hw;
+       u8 status;
+       u8 flags_sw;
+       __le16 token;
+       __le16 cmd_id;
+};
 
 struct mc_command {
        u64 header;
@@ -72,60 +68,41 @@ enum mc_cmd_status {
  */
 
 /* High priority flag */
-#define MC_CMD_FLAG_PRI                0x00008000
+#define MC_CMD_FLAG_PRI                0x80
 /* Command completion flag */
-#define MC_CMD_FLAG_INTR_DIS   0x01000000
-
-/*
- * TODO Remove following two defines after completion of flib 8.0.0
- * integration
- */
-#define MC_CMD_PRI_LOW         0 /*!< Low Priority command indication */
-#define MC_CMD_PRI_HIGH                1 /*!< High Priority command indication */
-
-#define MC_CMD_HDR_CMDID_O     52      /* Command ID field offset */
-#define MC_CMD_HDR_CMDID_S     12      /* Command ID field size */
-#define MC_CMD_HDR_TOKEN_O     38      /* Token field offset */
-#define MC_CMD_HDR_TOKEN_S     10      /* Token field size */
-#define MC_CMD_HDR_STATUS_O    16      /* Status field offset */
-#define MC_CMD_HDR_STATUS_S    8       /* Status field size*/
-#define MC_CMD_HDR_FLAGS_O     0       /* Flags field offset */
-#define MC_CMD_HDR_FLAGS_S     32      /* Flags field size*/
-#define MC_CMD_HDR_FLAGS_MASK  0xFF00FF00 /* Command flags mask */
-
-#define MC_CMD_HDR_READ_STATUS(_hdr) \
-       ((enum mc_cmd_status)mc_dec((_hdr), \
-               MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S))
-
-#define MC_CMD_HDR_READ_TOKEN(_hdr) \
-       ((u16)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S))
-
-#define MC_CMD_HDR_READ_FLAGS(_hdr) \
-       ((u32)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S))
+#define MC_CMD_FLAG_INTR_DIS   0x01
 
-#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \
-       ((_ext)[_param] |= mc_enc((_offset), (_width), _arg))
-
-#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \
-       ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg))
-
-#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \
-       (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width)))
+#define MC_CMD_HDR_CMDID_MASK          0xFFF0
+#define MC_CMD_HDR_CMDID_SHIFT         4
+#define MC_CMD_HDR_TOKEN_MASK          0xFFC0
+#define MC_CMD_HDR_TOKEN_SHIFT         6
 
 static inline u64 mc_encode_cmd_header(u16 cmd_id,
                                       u32 cmd_flags,
                                       u16 token)
 {
-       u64 hdr;
+       u64 header = 0;
+       struct mc_cmd_header *hdr = (struct mc_cmd_header *)&header;
+
+       hdr->cmd_id = cpu_to_le16((cmd_id << MC_CMD_HDR_CMDID_SHIFT) &
+                                 MC_CMD_HDR_CMDID_MASK);
+       hdr->token = cpu_to_le16((token << MC_CMD_HDR_TOKEN_SHIFT) &
+                                MC_CMD_HDR_TOKEN_MASK);
+       hdr->status = MC_CMD_STATUS_READY;
+       if (cmd_flags & MC_CMD_FLAG_PRI)
+               hdr->flags_hw = MC_CMD_FLAG_PRI;
+       if (cmd_flags & MC_CMD_FLAG_INTR_DIS)
+               hdr->flags_sw = MC_CMD_FLAG_INTR_DIS;
+
+       return header;
+}
 
-       hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id);
-       hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S,
-                      (cmd_flags & MC_CMD_HDR_FLAGS_MASK));
-       hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token);
-       hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S,
-                      MC_CMD_STATUS_READY);
+static inline u16 mc_cmd_hdr_read_token(struct mc_command *cmd)
+{
+       struct mc_cmd_header *hdr = (struct mc_cmd_header *)&cmd->header;
+       u16 token = le16_to_cpu(hdr->token);
 
-       return hdr;
+       return (token & MC_CMD_HDR_TOKEN_MASK) >> MC_CMD_HDR_TOKEN_SHIFT;
 }
 
 #endif /* __FSL_MC_CMD_H */
index ac7c1ce68c03a5ce3dc42b389e96786b84cbc039..853cbf38a4004cb16d2fc9db58ef5ed9381f5ac7 100644 (file)
@@ -39,7 +39,7 @@ struct fsl_mc_bus;
  */
 struct fsl_mc_driver {
        struct device_driver driver;
-       const struct fsl_mc_device_match_id *match_id_table;
+       const struct fsl_mc_device_id *match_id_table;
        int (*probe)(struct fsl_mc_device *dev);
        int (*remove)(struct fsl_mc_device *dev);
        void (*shutdown)(struct fsl_mc_device *dev);
@@ -50,23 +50,6 @@ struct fsl_mc_driver {
 #define to_fsl_mc_driver(_drv) \
        container_of(_drv, struct fsl_mc_driver, driver)
 
-/**
- * struct fsl_mc_device_match_id - MC object device Id entry for driver matching
- * @vendor: vendor ID
- * @obj_type: MC object type
- * @ver_major: MC object version major number
- * @ver_minor: MC object version minor number
- *
- * Type of entries in the "device Id" table for MC object devices supported by
- * a MC object device driver. The last entry of the table has vendor set to 0x0
- */
-struct fsl_mc_device_match_id {
-       u16 vendor;
-       const char obj_type[16];
-       u32 ver_major;
-       u32 ver_minor;
-};
-
 /**
  * enum fsl_mc_pool_type - Types of allocatable MC bus resources
  *
@@ -224,6 +207,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
 
 void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
 
+bool fsl_mc_is_root_dprc(struct device *dev);
+
 extern struct bus_type fsl_mc_bus_type;
 
 #endif /* _FSL_MC_H_ */
index f066aa30f0ac4c75428a8c2c98208239b3334eb1..1c994b57c7d2082edca68c1634fe3fe5afa5cdca 100644 (file)
@@ -51,20 +51,6 @@ config ADIS16240
          To compile this driver as a module, say M here: the module will be
          called adis16240.
 
-config LIS3L02DQ
-       tristate "ST Microelectronics LIS3L02DQ Accelerometer Driver"
-       depends on SPI
-       select IIO_TRIGGER if IIO_BUFFER
-       depends on !IIO_BUFFER || IIO_KFIFO_BUF
-       depends on GPIOLIB || COMPILE_TEST
-       help
-         Say Y here to build SPI support for the ST microelectronics
-         accelerometer. The driver supplies direct access via sysfs files
-         and an event interface via a character device.
-
-         To compile this driver as a module, say M here: the module will be
-         called lis3l02dq.
-
 config SCA3000
        depends on IIO_BUFFER
        depends on SPI
index 415329c96f0ce21ac7559b7b44524427ebf1f7e4..1810a434a755048d34fcd87f7073feef9b024272 100644 (file)
@@ -14,9 +14,5 @@ obj-$(CONFIG_ADIS16209) += adis16209.o
 adis16240-y             := adis16240_core.o
 obj-$(CONFIG_ADIS16240) += adis16240.o
 
-lis3l02dq-y            := lis3l02dq_core.o
-lis3l02dq-$(CONFIG_IIO_BUFFER) += lis3l02dq_ring.o
-obj-$(CONFIG_LIS3L02DQ)        += lis3l02dq.o
-
 sca3000-y              := sca3000_core.o sca3000_ring.o
 obj-$(CONFIG_SCA3000)  += sca3000.o
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
deleted file mode 100644 (file)
index 6bd3d4d..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * LISL02DQ.h -- support STMicroelectronics LISD02DQ
- *               3d 2g Linear Accelerometers via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * Loosely based upon tle62x0.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef SPI_LIS3L02DQ_H_
-#define SPI_LIS3L02DQ_H_
-#define LIS3L02DQ_READ_REG(a) ((a) | 0x80)
-#define LIS3L02DQ_WRITE_REG(a) a
-
-/* Calibration parameters */
-#define LIS3L02DQ_REG_OFFSET_X_ADDR            0x16
-#define LIS3L02DQ_REG_OFFSET_Y_ADDR            0x17
-#define LIS3L02DQ_REG_OFFSET_Z_ADDR            0x18
-
-#define LIS3L02DQ_REG_GAIN_X_ADDR              0x19
-#define LIS3L02DQ_REG_GAIN_Y_ADDR              0x1A
-#define LIS3L02DQ_REG_GAIN_Z_ADDR              0x1B
-
-/* Control Register (1 of 2) */
-#define LIS3L02DQ_REG_CTRL_1_ADDR              0x20
-/* Power ctrl - either bit set corresponds to on*/
-#define LIS3L02DQ_REG_CTRL_1_PD_ON             0xC0
-
-/* Decimation Factor  */
-#define LIS3L02DQ_DEC_MASK                     0x30
-#define LIS3L02DQ_REG_CTRL_1_DF_128            0x00
-#define LIS3L02DQ_REG_CTRL_1_DF_64             0x10
-#define LIS3L02DQ_REG_CTRL_1_DF_32             0x20
-#define LIS3L02DQ_REG_CTRL_1_DF_8              (0x10 | 0x20)
-
-/* Self Test Enable */
-#define LIS3L02DQ_REG_CTRL_1_SELF_TEST_ON      0x08
-
-/* Axes enable ctrls */
-#define LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE     0x04
-#define LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE     0x02
-#define LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE     0x01
-
-/* Control Register (2 of 2) */
-#define LIS3L02DQ_REG_CTRL_2_ADDR              0x21
-
-/* Block Data Update only after MSB and LSB read */
-#define LIS3L02DQ_REG_CTRL_2_BLOCK_UPDATE      0x40
-
-/* Set to big endian output */
-#define LIS3L02DQ_REG_CTRL_2_BIG_ENDIAN                0x20
-
-/* Reboot memory content */
-#define LIS3L02DQ_REG_CTRL_2_REBOOT_MEMORY     0x10
-
-/* Interrupt Enable - applies data ready to the RDY pad */
-#define LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT  0x08
-
-/* Enable Data Ready Generation - relationship with previous unclear in docs */
-#define LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION 0x04
-
-/* SPI 3 wire mode */
-#define LIS3L02DQ_REG_CTRL_2_THREE_WIRE_SPI_MODE       0x02
-
-/* Data alignment, default is 12 bit right justified
- * - option for 16 bit left justified
- */
-#define LIS3L02DQ_REG_CTRL_2_DATA_ALIGNMENT_16_BIT_LEFT_JUSTIFIED      0x01
-
-/* Interrupt related stuff */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_ADDR                 0x23
-
-/* Switch from or combination of conditions to and */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_BOOLEAN_AND          0x80
-
-/* Latch interrupt request,
- * if on ack must be given by reading the ack register
- */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC            0x40
-
-/* Z Interrupt on High (above threshold) */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_HIGH     0x20
-/* Z Interrupt on Low */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Z_LOW      0x10
-/* Y Interrupt on High */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_HIGH     0x08
-/* Y Interrupt on Low */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_Y_LOW      0x04
-/* X Interrupt on High */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_HIGH     0x02
-/* X Interrupt on Low */
-#define LIS3L02DQ_REG_WAKE_UP_CFG_INTERRUPT_X_LOW 0x01
-
-/* Register that gives description of what caused interrupt
- * - latched if set in CFG_ADDRES
- */
-#define LIS3L02DQ_REG_WAKE_UP_SRC_ADDR                 0x24
-/* top bit ignored */
-/* Interrupt Active */
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_ACTIVATED  0x40
-/* Interupts that have been triggered */
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH     0x20
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW      0x10
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH     0x08
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW      0x04
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH     0x02
-#define LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW      0x01
-
-#define LIS3L02DQ_REG_WAKE_UP_ACK_ADDR                 0x25
-
-/* Status register */
-#define LIS3L02DQ_REG_STATUS_ADDR                      0x27
-/* XYZ axis data overrun - first is all overrun? */
-#define LIS3L02DQ_REG_STATUS_XYZ_OVERRUN               0x80
-#define LIS3L02DQ_REG_STATUS_Z_OVERRUN                 0x40
-#define LIS3L02DQ_REG_STATUS_Y_OVERRUN                 0x20
-#define LIS3L02DQ_REG_STATUS_X_OVERRUN                 0x10
-/* XYZ new data available - first is all 3 available? */
-#define LIS3L02DQ_REG_STATUS_XYZ_NEW_DATA              0x08
-#define LIS3L02DQ_REG_STATUS_Z_NEW_DATA                        0x04
-#define LIS3L02DQ_REG_STATUS_Y_NEW_DATA                        0x02
-#define LIS3L02DQ_REG_STATUS_X_NEW_DATA                        0x01
-
-/* The accelerometer readings - low and high bytes.
- * Form of high byte dependent on justification set in ctrl reg
- */
-#define LIS3L02DQ_REG_OUT_X_L_ADDR                     0x28
-#define LIS3L02DQ_REG_OUT_X_H_ADDR                     0x29
-#define LIS3L02DQ_REG_OUT_Y_L_ADDR                     0x2A
-#define LIS3L02DQ_REG_OUT_Y_H_ADDR                     0x2B
-#define LIS3L02DQ_REG_OUT_Z_L_ADDR                     0x2C
-#define LIS3L02DQ_REG_OUT_Z_H_ADDR                     0x2D
-
-/* Threshold values for all axes and both above and below thresholds
- * - i.e. there is only one value
- */
-#define LIS3L02DQ_REG_THS_L_ADDR                       0x2E
-#define LIS3L02DQ_REG_THS_H_ADDR                       0x2F
-
-#define LIS3L02DQ_DEFAULT_CTRL1 (LIS3L02DQ_REG_CTRL_1_PD_ON          \
-                                | LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE \
-                                | LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE \
-                                | LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE \
-                                | LIS3L02DQ_REG_CTRL_1_DF_128)
-
-#define LIS3L02DQ_DEFAULT_CTRL2        0
-
-#define LIS3L02DQ_MAX_TX 12
-#define LIS3L02DQ_MAX_RX 12
-/**
- * struct lis3l02dq_state - device instance specific data
- * @us:                        actual spi_device
- * @trig:              data ready trigger registered with iio
- * @buf_lock:          mutex to protect tx and rx
- * @tx:                        transmit buffer
- * @rx:                        receive buffer
- **/
-struct lis3l02dq_state {
-       struct spi_device               *us;
-       struct iio_trigger              *trig;
-       struct mutex                    buf_lock;
-       int                             gpio;
-       bool                            trigger_on;
-
-       u8      tx[LIS3L02DQ_MAX_RX] ____cacheline_aligned;
-       u8      rx[LIS3L02DQ_MAX_RX] ____cacheline_aligned;
-};
-
-int lis3l02dq_spi_read_reg_8(struct iio_dev *indio_dev,
-                            u8 reg_address,
-                            u8 *val);
-
-int lis3l02dq_spi_write_reg_8(struct iio_dev *indio_dev,
-                             u8 reg_address,
-                             u8 val);
-
-int lis3l02dq_disable_all_events(struct iio_dev *indio_dev);
-
-#ifdef CONFIG_IIO_BUFFER
-/* At the moment triggers are only used for buffer
- * filling. This may change!
- */
-void lis3l02dq_remove_trigger(struct iio_dev *indio_dev);
-int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
-
-int lis3l02dq_configure_buffer(struct iio_dev *indio_dev);
-void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
-
-irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private);
-#define lis3l02dq_th lis3l02dq_data_rdy_trig_poll
-
-#else /* CONFIG_IIO_BUFFER */
-#define lis3l02dq_th lis3l02dq_nobuffer
-
-static inline void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
-{
-}
-
-static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
-{
-       return 0;
-}
-
-static int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
-{
-       return 0;
-}
-
-static inline void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
-{
-}
-#endif /* CONFIG_IIO_BUFFER */
-#endif /* SPI_LIS3L02DQ_H_ */
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
deleted file mode 100644 (file)
index 7a6fed3..0000000
+++ /dev/null
@@ -1,814 +0,0 @@
-/*
- * lis3l02dq.c support STMicroelectronics LISD02DQ
- *             3d 2g Linear Accelerometers via SPI
- *
- * Copyright (c) 2007 Jonathan Cameron <jic23@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Settings:
- * 16 bit left justified mode used.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/iio/buffer.h>
-
-#include "lis3l02dq.h"
-
-/* At the moment the spi framework doesn't allow global setting of cs_change.
- * It's in the likely to be added comment at the top of spi.h.
- * This means that use cannot be made of spi_write etc.
- */
-/* direct copy of the irq_default_primary_handler */
-#ifndef CONFIG_IIO_BUFFER
-static irqreturn_t lis3l02dq_nobuffer(int irq, void *private)
-{
-       return IRQ_WAKE_THREAD;
-}
-#endif
-
-/**
- * lis3l02dq_spi_read_reg_8() - read single byte from a single register
- * @indio_dev: iio_dev for this actual device
- * @reg_address: the address of the register to be read
- * @val: pass back the resulting value
- **/
-int lis3l02dq_spi_read_reg_8(struct iio_dev *indio_dev,
-                            u8 reg_address, u8 *val)
-{
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-       int ret;
-       struct spi_transfer xfer = {
-               .tx_buf = st->tx,
-               .rx_buf = st->rx,
-               .bits_per_word = 8,
-               .len = 2,
-       };
-
-       mutex_lock(&st->buf_lock);
-       st->tx[0] = LIS3L02DQ_READ_REG(reg_address);
-       st->tx[1] = 0;
-
-       ret = spi_sync_transfer(st->us, &xfer, 1);
-       *val = st->rx[1];
-       mutex_unlock(&st->buf_lock);
-
-       return ret;
-}
-
-/**
- * lis3l02dq_spi_write_reg_8() - write single byte to a register
- * @indio_dev: iio_dev for this device
- * @reg_address: the address of the register to be written
- * @val: the value to write
- **/
-int lis3l02dq_spi_write_reg_8(struct iio_dev *indio_dev,
-                             u8 reg_address,
-                             u8 val)
-{
-       int ret;
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-
-       mutex_lock(&st->buf_lock);
-       st->tx[0] = LIS3L02DQ_WRITE_REG(reg_address);
-       st->tx[1] = val;
-       ret = spi_write(st->us, st->tx, 2);
-       mutex_unlock(&st->buf_lock);
-
-       return ret;
-}
-
-/**
- * lisl302dq_spi_write_reg_s16() - write 2 bytes to a pair of registers
- * @indio_dev: iio_dev for this device
- * @lower_reg_address: the address of the lower of the two registers.
- *               Second register is assumed to have address one greater.
- * @value: value to be written
- **/
-static int lis3l02dq_spi_write_reg_s16(struct iio_dev *indio_dev,
-                                      u8 lower_reg_address,
-                                      s16 value)
-{
-       int ret;
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-       struct spi_transfer xfers[] = { {
-                       .tx_buf = st->tx,
-                       .bits_per_word = 8,
-                       .len = 2,
-                       .cs_change = 1,
-               }, {
-                       .tx_buf = st->tx + 2,
-                       .bits_per_word = 8,
-                       .len = 2,
-               },
-       };
-
-       mutex_lock(&st->buf_lock);
-       st->tx[0] = LIS3L02DQ_WRITE_REG(lower_reg_address);
-       st->tx[1] = value & 0xFF;
-       st->tx[2] = LIS3L02DQ_WRITE_REG(lower_reg_address + 1);
-       st->tx[3] = (value >> 8) & 0xFF;
-
-       ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
-       mutex_unlock(&st->buf_lock);
-
-       return ret;
-}
-
-static int lis3l02dq_read_reg_s16(struct iio_dev *indio_dev,
-                                 u8 lower_reg_address,
-                                 int *val)
-{
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-       int ret;
-       s16 tempval;
-       struct spi_transfer xfers[] = { {
-                       .tx_buf = st->tx,
-                       .rx_buf = st->rx,
-                       .bits_per_word = 8,
-                       .len = 2,
-                       .cs_change = 1,
-               }, {
-                       .tx_buf = st->tx + 2,
-                       .rx_buf = st->rx + 2,
-                       .bits_per_word = 8,
-                       .len = 2,
-               },
-       };
-
-       mutex_lock(&st->buf_lock);
-       st->tx[0] = LIS3L02DQ_READ_REG(lower_reg_address);
-       st->tx[1] = 0;
-       st->tx[2] = LIS3L02DQ_READ_REG(lower_reg_address + 1);
-       st->tx[3] = 0;
-
-       ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
-       if (ret) {
-               dev_err(&st->us->dev, "problem when reading 16 bit register");
-               goto error_ret;
-       }
-       tempval = (s16)(st->rx[1]) | ((s16)(st->rx[3]) << 8);
-
-       *val = tempval;
-error_ret:
-       mutex_unlock(&st->buf_lock);
-       return ret;
-}
-
-enum lis3l02dq_rm_ind {
-       LIS3L02DQ_ACCEL,
-       LIS3L02DQ_GAIN,
-       LIS3L02DQ_BIAS,
-};
-
-static u8 lis3l02dq_axis_map[3][3] = {
-       [LIS3L02DQ_ACCEL] = { LIS3L02DQ_REG_OUT_X_L_ADDR,
-                             LIS3L02DQ_REG_OUT_Y_L_ADDR,
-                             LIS3L02DQ_REG_OUT_Z_L_ADDR },
-       [LIS3L02DQ_GAIN] = { LIS3L02DQ_REG_GAIN_X_ADDR,
-                            LIS3L02DQ_REG_GAIN_Y_ADDR,
-                            LIS3L02DQ_REG_GAIN_Z_ADDR },
-       [LIS3L02DQ_BIAS] = { LIS3L02DQ_REG_OFFSET_X_ADDR,
-                            LIS3L02DQ_REG_OFFSET_Y_ADDR,
-                            LIS3L02DQ_REG_OFFSET_Z_ADDR }
-};
-
-static int lis3l02dq_read_thresh(struct iio_dev *indio_dev,
-                                const struct iio_chan_spec *chan,
-                                enum iio_event_type type,
-                                enum iio_event_direction dir,
-                                enum iio_event_info info,
-                                int *val, int *val2)
-{
-       int ret;
-
-       ret = lis3l02dq_read_reg_s16(indio_dev, LIS3L02DQ_REG_THS_L_ADDR, val);
-       if (ret)
-               return ret;
-       return IIO_VAL_INT;
-}
-
-static int lis3l02dq_write_thresh(struct iio_dev *indio_dev,
-                                 const struct iio_chan_spec *chan,
-                                 enum iio_event_type type,
-                                 enum iio_event_direction dir,
-                                 enum iio_event_info info,
-                                 int val, int val2)
-{
-       u16 value = val;
-
-       return lis3l02dq_spi_write_reg_s16(indio_dev,
-                                          LIS3L02DQ_REG_THS_L_ADDR,
-                                          value);
-}
-
-static int lis3l02dq_write_raw(struct iio_dev *indio_dev,
-                              struct iio_chan_spec const *chan,
-                              int val,
-                              int val2,
-                              long mask)
-{
-       int ret = -EINVAL, reg;
-       u8 uval;
-       s8 sval;
-
-       switch (mask) {
-       case IIO_CHAN_INFO_CALIBBIAS:
-               if (val > 255 || val < -256)
-                       return -EINVAL;
-               sval = val;
-               reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
-               ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, sval);
-               break;
-       case IIO_CHAN_INFO_CALIBSCALE:
-               if (val & ~0xFF)
-                       return -EINVAL;
-               uval = val;
-               reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
-               ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, uval);
-               break;
-       }
-       return ret;
-}
-
-static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
-                             struct iio_chan_spec const *chan,
-                             int *val,
-                             int *val2,
-                             long mask)
-{
-       u8 utemp;
-       s8 stemp;
-       ssize_t ret = 0;
-       u8 reg;
-
-       switch (mask) {
-       case IIO_CHAN_INFO_RAW:
-               /* Take the iio_dev status lock */
-               mutex_lock(&indio_dev->mlock);
-               if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
-                       ret = -EBUSY;
-               } else {
-                       reg = lis3l02dq_axis_map
-                               [LIS3L02DQ_ACCEL][chan->address];
-                       ret = lis3l02dq_read_reg_s16(indio_dev, reg, val);
-               }
-               mutex_unlock(&indio_dev->mlock);
-               if (ret < 0)
-                       goto error_ret;
-               return IIO_VAL_INT;
-       case IIO_CHAN_INFO_SCALE:
-               *val = 0;
-               *val2 = 9580;
-               return IIO_VAL_INT_PLUS_MICRO;
-       case IIO_CHAN_INFO_CALIBSCALE:
-               reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
-               ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, &utemp);
-               if (ret)
-                       goto error_ret;
-               /* to match with what previous code does */
-               *val = utemp;
-               return IIO_VAL_INT;
-
-       case IIO_CHAN_INFO_CALIBBIAS:
-               reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
-               ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, (u8 *)&stemp);
-               /* to match with what previous code does */
-               *val = stemp;
-               return IIO_VAL_INT;
-       }
-error_ret:
-       return ret;
-}
-
-static ssize_t lis3l02dq_read_frequency(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-       int ret, len = 0;
-       s8 t;
-
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_CTRL_1_ADDR,
-                                      (u8 *)&t);
-       if (ret)
-               return ret;
-       t &= LIS3L02DQ_DEC_MASK;
-       switch (t) {
-       case LIS3L02DQ_REG_CTRL_1_DF_128:
-               len = sprintf(buf, "280\n");
-               break;
-       case LIS3L02DQ_REG_CTRL_1_DF_64:
-               len = sprintf(buf, "560\n");
-               break;
-       case LIS3L02DQ_REG_CTRL_1_DF_32:
-               len = sprintf(buf, "1120\n");
-               break;
-       case LIS3L02DQ_REG_CTRL_1_DF_8:
-               len = sprintf(buf, "4480\n");
-               break;
-       }
-       return len;
-}
-
-static ssize_t lis3l02dq_write_frequency(struct device *dev,
-                                        struct device_attribute *attr,
-                                        const char *buf,
-                                        size_t len)
-{
-       struct iio_dev *indio_dev = dev_to_iio_dev(dev);
-       unsigned long val;
-       int ret;
-       u8 t;
-
-       ret = kstrtoul(buf, 10, &val);
-       if (ret)
-               return ret;
-
-       mutex_lock(&indio_dev->mlock);
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_CTRL_1_ADDR,
-                                      &t);
-       if (ret)
-               goto error_ret_mutex;
-       /* Wipe the bits clean */
-       t &= ~LIS3L02DQ_DEC_MASK;
-       switch (val) {
-       case 280:
-               t |= LIS3L02DQ_REG_CTRL_1_DF_128;
-               break;
-       case 560:
-               t |= LIS3L02DQ_REG_CTRL_1_DF_64;
-               break;
-       case 1120:
-               t |= LIS3L02DQ_REG_CTRL_1_DF_32;
-               break;
-       case 4480:
-               t |= LIS3L02DQ_REG_CTRL_1_DF_8;
-               break;
-       default:
-               ret = -EINVAL;
-               goto error_ret_mutex;
-       }
-
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_CTRL_1_ADDR,
-                                       t);
-
-error_ret_mutex:
-       mutex_unlock(&indio_dev->mlock);
-
-       return ret ? ret : len;
-}
-
-static int lis3l02dq_initial_setup(struct iio_dev *indio_dev)
-{
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-       int ret;
-       u8 val, valtest;
-
-       st->us->mode = SPI_MODE_3;
-
-       spi_setup(st->us);
-
-       val = LIS3L02DQ_DEFAULT_CTRL1;
-       /* Write suitable defaults to ctrl1 */
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_CTRL_1_ADDR,
-                                       val);
-       if (ret) {
-               dev_err(&st->us->dev, "problem with setup control register 1");
-               goto err_ret;
-       }
-       /* Repeat as sometimes doesn't work first time? */
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_CTRL_1_ADDR,
-                                       val);
-       if (ret) {
-               dev_err(&st->us->dev, "problem with setup control register 1");
-               goto err_ret;
-       }
-
-       /*
-        * Read back to check this has worked acts as loose test of correct
-        * chip
-        */
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_CTRL_1_ADDR,
-                                      &valtest);
-       if (ret || (valtest != val)) {
-               dev_err(&indio_dev->dev,
-                       "device not playing ball %d %d\n", valtest, val);
-               ret = -EINVAL;
-               goto err_ret;
-       }
-
-       val = LIS3L02DQ_DEFAULT_CTRL2;
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_CTRL_2_ADDR,
-                                       val);
-       if (ret) {
-               dev_err(&st->us->dev, "problem with setup control register 2");
-               goto err_ret;
-       }
-
-       val = LIS3L02DQ_REG_WAKE_UP_CFG_LATCH_SRC;
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
-                                       val);
-       if (ret)
-               dev_err(&st->us->dev, "problem with interrupt cfg register");
-err_ret:
-
-       return ret;
-}
-
-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
-                             lis3l02dq_read_frequency,
-                             lis3l02dq_write_frequency);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("280 560 1120 4480");
-
-static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
-{
-       struct iio_dev *indio_dev = private;
-       u8 t;
-
-       s64 timestamp = iio_get_time_ns();
-
-       lis3l02dq_spi_read_reg_8(indio_dev,
-                                LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
-                                &t);
-
-       if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_HIGH)
-               iio_push_event(indio_dev,
-                              IIO_MOD_EVENT_CODE(IIO_ACCEL,
-                                                 0,
-                                                 IIO_MOD_Z,
-                                                 IIO_EV_TYPE_THRESH,
-                                                 IIO_EV_DIR_RISING),
-                              timestamp);
-
-       if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Z_LOW)
-               iio_push_event(indio_dev,
-                              IIO_MOD_EVENT_CODE(IIO_ACCEL,
-                                                 0,
-                                                 IIO_MOD_Z,
-                                                 IIO_EV_TYPE_THRESH,
-                                                 IIO_EV_DIR_FALLING),
-                              timestamp);
-
-       if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_HIGH)
-               iio_push_event(indio_dev,
-                              IIO_MOD_EVENT_CODE(IIO_ACCEL,
-                                                 0,
-                                                 IIO_MOD_Y,
-                                                 IIO_EV_TYPE_THRESH,
-                                                 IIO_EV_DIR_RISING),
-                              timestamp);
-
-       if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_Y_LOW)
-               iio_push_event(indio_dev,
-                              IIO_MOD_EVENT_CODE(IIO_ACCEL,
-                                                 0,
-                                                 IIO_MOD_Y,
-                                                 IIO_EV_TYPE_THRESH,
-                                                 IIO_EV_DIR_FALLING),
-                              timestamp);
-
-       if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_HIGH)
-               iio_push_event(indio_dev,
-                              IIO_MOD_EVENT_CODE(IIO_ACCEL,
-                                                 0,
-                                                 IIO_MOD_X,
-                                                 IIO_EV_TYPE_THRESH,
-                                                 IIO_EV_DIR_RISING),
-                              timestamp);
-
-       if (t & LIS3L02DQ_REG_WAKE_UP_SRC_INTERRUPT_X_LOW)
-               iio_push_event(indio_dev,
-                              IIO_MOD_EVENT_CODE(IIO_ACCEL,
-                                                 0,
-                                                 IIO_MOD_X,
-                                                 IIO_EV_TYPE_THRESH,
-                                                 IIO_EV_DIR_FALLING),
-                              timestamp);
-
-       /* Ack and allow for new interrupts */
-       lis3l02dq_spi_read_reg_8(indio_dev,
-                                LIS3L02DQ_REG_WAKE_UP_ACK_ADDR,
-                                &t);
-
-       return IRQ_HANDLED;
-}
-
-static const struct iio_event_spec lis3l02dq_event[] = {
-       {
-               .type = IIO_EV_TYPE_THRESH,
-               .dir = IIO_EV_DIR_RISING,
-               .mask_separate = BIT(IIO_EV_INFO_ENABLE),
-               .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
-       }, {
-               .type = IIO_EV_TYPE_THRESH,
-               .dir = IIO_EV_DIR_FALLING,
-               .mask_separate = BIT(IIO_EV_INFO_ENABLE),
-               .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE),
-       }
-};
-
-#define LIS3L02DQ_CHAN(index, mod)                             \
-       {                                                       \
-               .type = IIO_ACCEL,                              \
-               .modified = 1,                                  \
-               .channel2 = mod,                                \
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |  \
-                       BIT(IIO_CHAN_INFO_CALIBSCALE) |         \
-                       BIT(IIO_CHAN_INFO_CALIBBIAS),           \
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
-               .address = index,                               \
-               .scan_index = index,                            \
-               .scan_type = {                                  \
-                       .sign = 's',                            \
-                       .realbits = 12,                         \
-                       .storagebits = 16,                      \
-               },                                              \
-               .event_spec = lis3l02dq_event,                  \
-               .num_event_specs = ARRAY_SIZE(lis3l02dq_event), \
-        }
-
-static const struct iio_chan_spec lis3l02dq_channels[] = {
-       LIS3L02DQ_CHAN(0, IIO_MOD_X),
-       LIS3L02DQ_CHAN(1, IIO_MOD_Y),
-       LIS3L02DQ_CHAN(2, IIO_MOD_Z),
-       IIO_CHAN_SOFT_TIMESTAMP(3)
-};
-
-static int lis3l02dq_read_event_config(struct iio_dev *indio_dev,
-                                      const struct iio_chan_spec *chan,
-                                      enum iio_event_type type,
-                                      enum iio_event_direction dir)
-{
-       u8 val;
-       int ret;
-       u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING));
-
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
-                                      &val);
-       if (ret < 0)
-               return ret;
-
-       return !!(val & mask);
-}
-
-int lis3l02dq_disable_all_events(struct iio_dev *indio_dev)
-{
-       int ret;
-       u8 control, val;
-
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_CTRL_2_ADDR,
-                                      &control);
-
-       control &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT;
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_CTRL_2_ADDR,
-                                       control);
-       if (ret)
-               goto error_ret;
-       /* Also for consistency clear the mask */
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
-                                      &val);
-       if (ret)
-               goto error_ret;
-       val &= ~0x3f;
-
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
-                                       val);
-       if (ret)
-               goto error_ret;
-
-       ret = control;
-error_ret:
-       return ret;
-}
-
-static int lis3l02dq_write_event_config(struct iio_dev *indio_dev,
-                                       const struct iio_chan_spec *chan,
-                                       enum iio_event_type type,
-                                       enum iio_event_direction dir,
-                                       int state)
-{
-       int ret = 0;
-       u8 val, control;
-       u8 currentlyset;
-       bool changed = false;
-       u8 mask = 1 << (chan->channel2 * 2 + (dir == IIO_EV_DIR_RISING));
-
-       mutex_lock(&indio_dev->mlock);
-       /* read current control */
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_CTRL_2_ADDR,
-                                      &control);
-       if (ret)
-               goto error_ret;
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
-                                      &val);
-       if (ret < 0)
-               goto error_ret;
-       currentlyset = val & mask;
-
-       if (!currentlyset && state) {
-               changed = true;
-               val |= mask;
-       } else if (currentlyset && !state) {
-               changed = true;
-               val &= ~mask;
-       }
-
-       if (changed) {
-               ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                               LIS3L02DQ_REG_WAKE_UP_CFG_ADDR,
-                                               val);
-               if (ret)
-                       goto error_ret;
-               control = val & 0x3f ?
-                       (control | LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT) :
-                       (control & ~LIS3L02DQ_REG_CTRL_2_ENABLE_INTERRUPT);
-               ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                               LIS3L02DQ_REG_CTRL_2_ADDR,
-                                               control);
-               if (ret)
-                       goto error_ret;
-       }
-
-error_ret:
-       mutex_unlock(&indio_dev->mlock);
-       return ret;
-}
-
-static struct attribute *lis3l02dq_attributes[] = {
-       &iio_dev_attr_sampling_frequency.dev_attr.attr,
-       &iio_const_attr_sampling_frequency_available.dev_attr.attr,
-       NULL
-};
-
-static const struct attribute_group lis3l02dq_attribute_group = {
-       .attrs = lis3l02dq_attributes,
-};
-
-static const struct iio_info lis3l02dq_info = {
-       .read_raw = &lis3l02dq_read_raw,
-       .write_raw = &lis3l02dq_write_raw,
-       .read_event_value = &lis3l02dq_read_thresh,
-       .write_event_value = &lis3l02dq_write_thresh,
-       .write_event_config = &lis3l02dq_write_event_config,
-       .read_event_config = &lis3l02dq_read_event_config,
-       .driver_module = THIS_MODULE,
-       .attrs = &lis3l02dq_attribute_group,
-};
-
-static int lis3l02dq_probe(struct spi_device *spi)
-{
-       int ret;
-       struct lis3l02dq_state *st;
-       struct iio_dev *indio_dev;
-
-       indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
-       if (!indio_dev)
-               return -ENOMEM;
-       st = iio_priv(indio_dev);
-       /* this is only used for removal purposes */
-       spi_set_drvdata(spi, indio_dev);
-
-       st->us = spi;
-       st->gpio = of_get_gpio(spi->dev.of_node, 0);
-       mutex_init(&st->buf_lock);
-       indio_dev->name = spi->dev.driver->name;
-       indio_dev->dev.parent = &spi->dev;
-       indio_dev->info = &lis3l02dq_info;
-       indio_dev->channels = lis3l02dq_channels;
-       indio_dev->num_channels = ARRAY_SIZE(lis3l02dq_channels);
-
-       indio_dev->modes = INDIO_DIRECT_MODE;
-
-       ret = lis3l02dq_configure_buffer(indio_dev);
-       if (ret)
-               return ret;
-
-       if (spi->irq) {
-               ret = request_threaded_irq(st->us->irq,
-                                          &lis3l02dq_th,
-                                          &lis3l02dq_event_handler,
-                                          IRQF_TRIGGER_RISING,
-                                          "lis3l02dq",
-                                          indio_dev);
-               if (ret)
-                       goto error_unreg_buffer_funcs;
-
-               ret = lis3l02dq_probe_trigger(indio_dev);
-               if (ret)
-                       goto error_free_interrupt;
-       }
-
-       /* Get the device into a sane initial state */
-       ret = lis3l02dq_initial_setup(indio_dev);
-       if (ret)
-               goto error_remove_trigger;
-
-       ret = iio_device_register(indio_dev);
-       if (ret)
-               goto error_remove_trigger;
-
-       return 0;
-
-error_remove_trigger:
-       if (spi->irq)
-               lis3l02dq_remove_trigger(indio_dev);
-error_free_interrupt:
-       if (spi->irq)
-               free_irq(st->us->irq, indio_dev);
-error_unreg_buffer_funcs:
-       lis3l02dq_unconfigure_buffer(indio_dev);
-       return ret;
-}
-
-/* Power down the device */
-static int lis3l02dq_stop_device(struct iio_dev *indio_dev)
-{
-       int ret;
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-       u8 val = 0;
-
-       mutex_lock(&indio_dev->mlock);
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_CTRL_1_ADDR,
-                                       val);
-       if (ret) {
-               dev_err(&st->us->dev, "problem with turning device off: ctrl1");
-               goto err_ret;
-       }
-
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_CTRL_2_ADDR,
-                                       val);
-       if (ret)
-               dev_err(&st->us->dev, "problem with turning device off: ctrl2");
-err_ret:
-       mutex_unlock(&indio_dev->mlock);
-       return ret;
-}
-
-/* fixme, confirm ordering in this function */
-static int lis3l02dq_remove(struct spi_device *spi)
-{
-       struct iio_dev *indio_dev = spi_get_drvdata(spi);
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-
-       iio_device_unregister(indio_dev);
-
-       lis3l02dq_disable_all_events(indio_dev);
-       lis3l02dq_stop_device(indio_dev);
-
-       if (spi->irq)
-               free_irq(st->us->irq, indio_dev);
-
-       lis3l02dq_remove_trigger(indio_dev);
-       lis3l02dq_unconfigure_buffer(indio_dev);
-
-       return 0;
-}
-
-static struct spi_driver lis3l02dq_driver = {
-       .driver = {
-               .name = "lis3l02dq",
-       },
-       .probe = lis3l02dq_probe,
-       .remove = lis3l02dq_remove,
-};
-module_spi_driver(lis3l02dq_driver);
-
-MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
-MODULE_DESCRIPTION("ST LIS3L02DQ Accelerometer SPI driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:lis3l02dq");
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
deleted file mode 100644 (file)
index 50c162e..0000000
+++ /dev/null
@@ -1,428 +0,0 @@
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/mutex.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/kfifo_buf.h>
-#include <linux/iio/trigger.h>
-#include <linux/iio/trigger_consumer.h>
-#include "lis3l02dq.h"
-
-/**
- * combine_8_to_16() utility function to munge two u8s into u16
- **/
-static inline u16 combine_8_to_16(u8 lower, u8 upper)
-{
-       u16 _lower = lower;
-       u16 _upper = upper;
-
-       return _lower | (_upper << 8);
-}
-
-/**
- * lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
- **/
-irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
-{
-       struct iio_dev *indio_dev = private;
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-
-       if (st->trigger_on) {
-               iio_trigger_poll(st->trig);
-               return IRQ_HANDLED;
-       }
-
-       return IRQ_WAKE_THREAD;
-}
-
-static const u8 read_all_tx_array[] = {
-       LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
-       LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
-       LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
-       LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
-       LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
-       LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
-};
-
-/**
- * lis3l02dq_read_all() Reads all channels currently selected
- * @indio_dev: IIO device state
- * @rx_array:  (dma capable) receive array, must be at least
- *             4*number of channels
- **/
-static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
-{
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-       struct spi_transfer *xfers;
-       struct spi_message msg;
-       int ret, i, j = 0;
-
-       xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
-                                     indio_dev->masklength) * 2,
-                       sizeof(*xfers), GFP_KERNEL);
-       if (!xfers)
-               return -ENOMEM;
-
-       mutex_lock(&st->buf_lock);
-
-       for (i = 0; i < ARRAY_SIZE(read_all_tx_array) / 4; i++)
-               if (test_bit(i, indio_dev->active_scan_mask)) {
-                       /* lower byte */
-                       xfers[j].tx_buf = st->tx + (2 * j);
-                       st->tx[2 * j] = read_all_tx_array[i * 4];
-                       st->tx[2 * j + 1] = 0;
-                       if (rx_array)
-                               xfers[j].rx_buf = rx_array + (j * 2);
-                       xfers[j].bits_per_word = 8;
-                       xfers[j].len = 2;
-                       xfers[j].cs_change = 1;
-                       j++;
-
-                       /* upper byte */
-                       xfers[j].tx_buf = st->tx + (2 * j);
-                       st->tx[2 * j] = read_all_tx_array[i * 4 + 2];
-                       st->tx[2 * j + 1] = 0;
-                       if (rx_array)
-                               xfers[j].rx_buf = rx_array + (j * 2);
-                       xfers[j].bits_per_word = 8;
-                       xfers[j].len = 2;
-                       xfers[j].cs_change = 1;
-                       j++;
-               }
-
-       /* After these are transmitted, the rx_buff should have
-        * values in alternate bytes
-        */
-       spi_message_init(&msg);
-       for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
-                                     indio_dev->masklength) * 2; j++)
-               spi_message_add_tail(&xfers[j], &msg);
-
-       ret = spi_sync(st->us, &msg);
-       mutex_unlock(&st->buf_lock);
-       kfree(xfers);
-
-       return ret;
-}
-
-static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
-                                       u8 *buf)
-{
-       int ret, i;
-       u8 *rx_array;
-       s16 *data = (s16 *)buf;
-       int scan_count = bitmap_weight(indio_dev->active_scan_mask,
-                                      indio_dev->masklength);
-
-       rx_array = kcalloc(4, scan_count, GFP_KERNEL);
-       if (!rx_array)
-               return -ENOMEM;
-       ret = lis3l02dq_read_all(indio_dev, rx_array);
-       if (ret < 0) {
-               kfree(rx_array);
-               return ret;
-       }
-       for (i = 0; i < scan_count; i++)
-               data[i] = combine_8_to_16(rx_array[i * 4 + 1],
-                                       rx_array[i * 4 + 3]);
-       kfree(rx_array);
-
-       return i * sizeof(data[0]);
-}
-
-static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
-{
-       struct iio_poll_func *pf = p;
-       struct iio_dev *indio_dev = pf->indio_dev;
-       int len = 0;
-       char *data;
-
-       data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
-       if (!data)
-               goto done;
-
-       if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
-               len = lis3l02dq_get_buffer_element(indio_dev, data);
-
-       iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
-
-       kfree(data);
-done:
-       iio_trigger_notify_done(indio_dev->trig);
-       return IRQ_HANDLED;
-}
-
-/* Caller responsible for locking as necessary. */
-static int
-__lis3l02dq_write_data_ready_config(struct iio_dev *indio_dev, bool state)
-{
-       int ret;
-       u8 valold;
-       bool currentlyset;
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-
-       /* Get the current event mask register */
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_CTRL_2_ADDR,
-                                      &valold);
-       if (ret)
-               goto error_ret;
-       /* Find out if data ready is already on */
-       currentlyset
-               = valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
-
-       /* Disable requested */
-       if (!state && currentlyset) {
-               /* Disable the data ready signal */
-               valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
-
-               /* The double write is to overcome a hardware bug? */
-               ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                               LIS3L02DQ_REG_CTRL_2_ADDR,
-                                               valold);
-               if (ret)
-                       goto error_ret;
-               ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                               LIS3L02DQ_REG_CTRL_2_ADDR,
-                                               valold);
-               if (ret)
-                       goto error_ret;
-               st->trigger_on = false;
-       /* Enable requested */
-       } else if (state && !currentlyset) {
-               /* If not set, enable requested
-                * first disable all events
-                */
-               ret = lis3l02dq_disable_all_events(indio_dev);
-               if (ret < 0)
-                       goto error_ret;
-
-               valold = ret |
-                       LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
-
-               st->trigger_on = true;
-               ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                               LIS3L02DQ_REG_CTRL_2_ADDR,
-                                               valold);
-               if (ret)
-                       goto error_ret;
-       }
-
-       return 0;
-error_ret:
-       return ret;
-}
-
-/**
- * lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
- *
- * If disabling the interrupt also does a final read to ensure it is clear.
- * This is only important in some cases where the scan enable elements are
- * switched before the buffer is reenabled.
- **/
-static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
-                                               bool state)
-{
-       struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
-       int ret = 0;
-       u8 t;
-
-       __lis3l02dq_write_data_ready_config(indio_dev, state);
-       if (!state) {
-               /*
-                * A possible quirk with the handler is currently worked around
-                * by ensuring outstanding read events are cleared.
-                */
-               ret = lis3l02dq_read_all(indio_dev, NULL);
-       }
-       lis3l02dq_spi_read_reg_8(indio_dev,
-                                LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
-                                &t);
-       return ret;
-}
-
-/**
- * lis3l02dq_trig_try_reen() try reenabling irq for data rdy trigger
- * @trig:      the datardy trigger
- */
-static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
-{
-       struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-       int i;
-
-       /* If gpio still high (or high again)
-        * In theory possible we will need to do this several times
-        */
-       for (i = 0; i < 5; i++)
-               if (gpio_get_value(st->gpio))
-                       lis3l02dq_read_all(indio_dev, NULL);
-               else
-                       break;
-       if (i == 5)
-               pr_info("Failed to clear the interrupt for lis3l02dq\n");
-
-       /* irq reenabled so success! */
-       return 0;
-}
-
-static const struct iio_trigger_ops lis3l02dq_trigger_ops = {
-       .owner = THIS_MODULE,
-       .set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state,
-       .try_reenable = &lis3l02dq_trig_try_reen,
-};
-
-int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
-{
-       int ret;
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-
-       st->trig = iio_trigger_alloc("lis3l02dq-dev%d", indio_dev->id);
-       if (!st->trig) {
-               ret = -ENOMEM;
-               goto error_ret;
-       }
-
-       st->trig->dev.parent = &st->us->dev;
-       st->trig->ops = &lis3l02dq_trigger_ops;
-       iio_trigger_set_drvdata(st->trig, indio_dev);
-       ret = iio_trigger_register(st->trig);
-       if (ret)
-               goto error_free_trig;
-
-       return 0;
-
-error_free_trig:
-       iio_trigger_free(st->trig);
-error_ret:
-       return ret;
-}
-
-void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
-{
-       struct lis3l02dq_state *st = iio_priv(indio_dev);
-
-       iio_trigger_unregister(st->trig);
-       iio_trigger_free(st->trig);
-}
-
-void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev)
-{
-       iio_dealloc_pollfunc(indio_dev->pollfunc);
-       iio_kfifo_free(indio_dev->buffer);
-}
-
-static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
-{
-       /* Disable unwanted channels otherwise the interrupt will not clear */
-       u8 t;
-       int ret;
-       bool oneenabled = false;
-
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_CTRL_1_ADDR,
-                                      &t);
-       if (ret)
-               goto error_ret;
-
-       if (test_bit(0, indio_dev->active_scan_mask)) {
-               t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
-               oneenabled = true;
-       } else {
-               t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
-       }
-       if (test_bit(1, indio_dev->active_scan_mask)) {
-               t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
-               oneenabled = true;
-       } else {
-               t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
-       }
-       if (test_bit(2, indio_dev->active_scan_mask)) {
-               t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
-               oneenabled = true;
-       } else {
-               t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
-       }
-       if (!oneenabled) /* what happens in this case is unknown */
-               return -EINVAL;
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_CTRL_1_ADDR,
-                                       t);
-       if (ret)
-               goto error_ret;
-
-       return iio_triggered_buffer_postenable(indio_dev);
-error_ret:
-       return ret;
-}
-
-/* Turn all channels on again */
-static int lis3l02dq_buffer_predisable(struct iio_dev *indio_dev)
-{
-       u8 t;
-       int ret;
-
-       ret = iio_triggered_buffer_predisable(indio_dev);
-       if (ret)
-               goto error_ret;
-
-       ret = lis3l02dq_spi_read_reg_8(indio_dev,
-                                      LIS3L02DQ_REG_CTRL_1_ADDR,
-                                      &t);
-       if (ret)
-               goto error_ret;
-       t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
-               LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
-               LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
-
-       ret = lis3l02dq_spi_write_reg_8(indio_dev,
-                                       LIS3L02DQ_REG_CTRL_1_ADDR,
-                                       t);
-
-error_ret:
-       return ret;
-}
-
-static const struct iio_buffer_setup_ops lis3l02dq_buffer_setup_ops = {
-       .postenable = &lis3l02dq_buffer_postenable,
-       .predisable = &lis3l02dq_buffer_predisable,
-};
-
-int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
-{
-       int ret;
-       struct iio_buffer *buffer;
-
-       buffer = iio_kfifo_allocate();
-       if (!buffer)
-               return -ENOMEM;
-
-       iio_device_attach_buffer(indio_dev, buffer);
-
-       buffer->scan_timestamp = true;
-       indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
-
-       /* Functions are NULL as we set handler below */
-       indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
-                                                &lis3l02dq_trigger_handler,
-                                                0,
-                                                indio_dev,
-                                                "lis3l02dq_consumer%d",
-                                                indio_dev->id);
-
-       if (!indio_dev->pollfunc) {
-               ret = -ENOMEM;
-               goto error_iio_sw_rb_free;
-       }
-
-       indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
-       return 0;
-
-error_iio_sw_rb_free:
-       iio_kfifo_free(indio_dev->buffer);
-       return ret;
-}
index ec12181822e659a508a977caf5e8acf43b7472ed..b5625f5d5e0e3ebf0962921285cd8e1740b63d4d 100644 (file)
@@ -774,7 +774,7 @@ static irqreturn_t sca3000_event_handler(int irq, void *private)
        struct iio_dev *indio_dev = private;
        struct sca3000_state *st = iio_priv(indio_dev);
        int ret, val;
-       s64 last_timestamp = iio_get_time_ns();
+       s64 last_timestamp = iio_get_time_ns(indio_dev);
 
        /*
         * Could lead if badly timed to an extra read of status reg,
@@ -1046,6 +1046,8 @@ static int sca3000_clean_setup(struct sca3000_state *st)
 
        /* Disable ring buffer */
        ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
+       if (ret < 0)
+               goto error_ret;
        ret = sca3000_write_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL,
                                     (ret & SCA3000_OUT_CTRL_PROT_MASK)
                                     | SCA3000_OUT_CTRL_BUF_X_EN
index a06b46cb81caa08e74a2728e8b1d955431a0a930..2177f1dd2b5dfc4b1c66d79f7937f21014d0c9e1 100644 (file)
@@ -705,7 +705,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
                                                        IIO_EV_DIR_RISING,
                                                        IIO_EV_TYPE_THRESH,
                                                        0, 0, 0),
-                                              iio_get_time_ns());
+                                              iio_get_time_ns(indio_dev));
                        else if (((channels[i] >> 11) & 0xFFF) <=
                                st->cell_threshlow)
                                iio_push_event(indio_dev,
@@ -715,7 +715,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
                                                        IIO_EV_DIR_FALLING,
                                                        IIO_EV_TYPE_THRESH,
                                                        0, 0, 0),
-                                              iio_get_time_ns());
+                                              iio_get_time_ns(indio_dev));
                } else {
                        if (((channels[i] >> 11) & 0xFFF) >= st->aux_threshhigh)
                                iio_push_event(indio_dev,
@@ -724,7 +724,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
                                                        0,
                                                        IIO_EV_TYPE_THRESH,
                                                        IIO_EV_DIR_RISING),
-                                              iio_get_time_ns());
+                                              iio_get_time_ns(indio_dev));
                        else if (((channels[i] >> 11) & 0xFFF) <=
                                st->aux_threshlow)
                                iio_push_event(indio_dev,
@@ -733,7 +733,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
                                                        0,
                                                        IIO_EV_TYPE_THRESH,
                                                        IIO_EV_DIR_FALLING),
-                                              iio_get_time_ns());
+                                              iio_get_time_ns(indio_dev));
                }
        }
 
index a6f8eb11242c124cc292e74594e16320bf092f9f..0572df9aad85278c9242f045fc5e766f01891ef8 100644 (file)
@@ -77,7 +77,8 @@ static void ad7606_poll_bh_to_ring(struct work_struct *work_s)
                        goto done;
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+       iio_push_to_buffers_with_timestamp(indio_dev, buf,
+                                          iio_get_time_ns(indio_dev));
 done:
        gpio_set_value(st->pdata->gpio_convst, 0);
        iio_trigger_notify_done(indio_dev->trig);
index ac3735c7f4a94a30dd2859bd6633fd7a739fb2d2..5e8115b0101159377c2c06f9669b7fe653276e97 100644 (file)
@@ -253,7 +253,8 @@ static const struct attribute_group ad7816_attribute_group = {
 
 static irqreturn_t ad7816_event_handler(int irq, void *private)
 {
-       iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI, iio_get_time_ns());
+       iio_push_event(private, IIO_EVENT_CODE_AD7816_OTI,
+                      iio_get_time_ns((struct iio_dev *)private));
        return IRQ_HANDLED;
 }
 
index a10e7d8e600287ce63cfcab9375e164df8549fba..3faffe59c9336768394533faffc66847e7b01071 100644 (file)
@@ -1752,7 +1752,7 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
                if ((chip->id & ID_FAMILY_MASK) != ID_ADT75XX)
                        stat1 &= 0x1F;
 
-               time = iio_get_time_ns();
+               time = iio_get_time_ns(indio_dev);
                if (stat1 & BIT(0))
                        iio_push_event(indio_dev,
                                       IIO_UNMOD_EVENT_CODE(IIO_TEMP, 0,
@@ -1804,7 +1804,7 @@ static irqreturn_t adt7316_event_handler(int irq, void *private)
                                                            0,
                                                            IIO_EV_TYPE_THRESH,
                                                            IIO_EV_DIR_RISING),
-                                      iio_get_time_ns());
+                                      iio_get_time_ns(indio_dev));
        }
 
        return IRQ_HANDLED;
index f6b9a10326ea450a9873a077f09a2008c1b1cc1d..5578a077fcfbd2699209530fa9c1a1605bb0007c 100644 (file)
@@ -493,7 +493,7 @@ static irqreturn_t ad7150_event_handler(int irq, void *private)
        struct iio_dev *indio_dev = private;
        struct ad7150_chip_info *chip = iio_priv(indio_dev);
        u8 int_status;
-       s64 timestamp = iio_get_time_ns();
+       s64 timestamp = iio_get_time_ns(indio_dev);
        int ret;
 
        ret = i2c_smbus_read_byte_data(chip->client, AD7150_STATUS);
index d553c8e18fccf7c71a370a12626a0260774cff05..ea15bc1c300cf2fa1aa60bef5ffb2cc4caa0f438 100644 (file)
@@ -1554,7 +1554,7 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
 {
        struct iio_dev *indio_dev = private;
        struct tsl2X7X_chip *chip = iio_priv(indio_dev);
-       s64 timestamp = iio_get_time_ns();
+       s64 timestamp = iio_get_time_ns(indio_dev);
        int ret;
        u8 value;
 
diff --git a/drivers/staging/ks7010/Kconfig b/drivers/staging/ks7010/Kconfig
new file mode 100644 (file)
index 0000000..0b92176
--- /dev/null
@@ -0,0 +1,10 @@
+config KS7010
+       tristate "KeyStream KS7010 SDIO support"
+       depends on MMC && WIRELESS
+       select WIRELESS_EXT
+       select WEXT_PRIV
+       select FW_LOADER
+       help
+         This is a driver for KeyStream KS7010 based SDIO WIFI cards. It is
+         found on at least later Spectec SDW-821 (FCC-ID "S2Y-WLAN-11G-K" only,
+         sadly not FCC-ID "S2Y-WLAN-11B-G") and Spectec SDW-823 microSD cards.
diff --git a/drivers/staging/ks7010/Makefile b/drivers/staging/ks7010/Makefile
new file mode 100644 (file)
index 0000000..69fcf8d
--- /dev/null
@@ -0,0 +1,4 @@
+obj-$(CONFIG_KS7010) += ks7010.o
+
+ccflags-y           += -DKS_WLAN_DEBUG=0
+ks7010-y            := michael_mic.o ks_hostif.o ks_wlan_net.o ks7010_sdio.o
diff --git a/drivers/staging/ks7010/TODO b/drivers/staging/ks7010/TODO
new file mode 100644 (file)
index 0000000..2938d35
--- /dev/null
@@ -0,0 +1,36 @@
+KS7010 Linux driver
+===================
+
+This driver is based on source code from the Ben Nanonote extra repository [1]
+which is based on the original v007 release from Renesas [2]. Some more
+background info about the chipset can be found here [3] and here [4]. Thank
+you to all which already participated in cleaning up the driver so far!
+
+[1] http://projects.qi-hardware.com/index.php/p/openwrt-packages/source/tree/master/ks7010/src
+[2] http://downloads.qi-hardware.com/software/ks7010_sdio_v007.tar.bz2
+[3] http://en.qi-hardware.com/wiki/Ben_NanoNote_Wi-Fi
+[4] https://wikidevi.com/wiki/Renesas
+
+TODO
+----
+
+First a few words what not to do (at least not blindly):
+
+- don't be overly strict with the 80 char limit. Only if it REALLY makes the
+  code more readable
+- No '#if 0/1' removal unless the surrounding code is understood and removal is
+  really OK. There might be some hints hidden there.
+
+Now the TODOs:
+
+- fix codechecker warnings (checkpatch, sparse, smatch). But PLEASE make sure
+  that you are not only silencing the warning but really fixing code. You
+  should understand the change you submit.
+- fix the 'card removal' event when card is inserted when booting
+- check what other upstream wireless mechanisms can be used instead of the
+  custom ones here
+
+Please send any patches to:
+Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Wolfram Sang <wsa@the-dreams.de>
+Linux Driver Project Developer List <driverdev-devel@linuxdriverproject.org>
diff --git a/drivers/staging/ks7010/eap_packet.h b/drivers/staging/ks7010/eap_packet.h
new file mode 100644 (file)
index 0000000..16a392a
--- /dev/null
@@ -0,0 +1,129 @@
+#ifndef EAP_PACKET_H
+#define EAP_PACKET_H
+
+#define WBIT(n) (1 << (n))
+
+#ifndef ETH_ALEN
+#define ETH_ALEN 6
+#endif
+
+struct ether_hdr {
+       unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+       unsigned char h_source[ETH_ALEN];       /* source ether addr    */
+       unsigned char h_dest_snap;
+       unsigned char h_source_snap;
+       unsigned char h_command;
+       unsigned char h_vendor_id[3];
+       unsigned short h_proto; /* packet type ID field */
+#define ETHER_PROTOCOL_TYPE_EAP                0x888e
+#define ETHER_PROTOCOL_TYPE_IP         0x0800
+#define ETHER_PROTOCOL_TYPE_ARP                0x0806
+       /* followed by length octets of data */
+} __attribute__ ((packed));
+
+struct ieee802_1x_hdr {
+       unsigned char version;
+       unsigned char type;
+       unsigned short length;
+       /* followed by length octets of data */
+} __attribute__ ((packed));
+
+#define EAPOL_VERSION 2
+
+enum { IEEE802_1X_TYPE_EAP_PACKET = 0,
+       IEEE802_1X_TYPE_EAPOL_START = 1,
+       IEEE802_1X_TYPE_EAPOL_LOGOFF = 2,
+       IEEE802_1X_TYPE_EAPOL_KEY = 3,
+       IEEE802_1X_TYPE_EAPOL_ENCAPSULATED_ASF_ALERT = 4
+};
+
+enum { EAPOL_KEY_TYPE_RC4 = 1, EAPOL_KEY_TYPE_RSN = 2,
+       EAPOL_KEY_TYPE_WPA = 254
+};
+
+#define IEEE8021X_REPLAY_COUNTER_LEN 8
+#define IEEE8021X_KEY_SIGN_LEN 16
+#define IEEE8021X_KEY_IV_LEN 16
+
+#define IEEE8021X_KEY_INDEX_FLAG 0x80
+#define IEEE8021X_KEY_INDEX_MASK 0x03
+
+struct ieee802_1x_eapol_key {
+       unsigned char type;
+       unsigned short key_length;
+       /* does not repeat within the life of the keying material used to
+        * encrypt the Key field; 64-bit NTP timestamp MAY be used here */
+       unsigned char replay_counter[IEEE8021X_REPLAY_COUNTER_LEN];
+       unsigned char key_iv[IEEE8021X_KEY_IV_LEN];     /* cryptographically random number */
+       unsigned char key_index;        /* key flag in the most significant bit:
+                                        * 0 = broadcast (default key),
+                                        * 1 = unicast (key mapping key); key index is in the
+                                        * 7 least significant bits */
+       /* HMAC-MD5 message integrity check computed with MS-MPPE-Send-Key as
+        * the key */
+       unsigned char key_signature[IEEE8021X_KEY_SIGN_LEN];
+
+       /* followed by key: if packet body length = 44 + key length, then the
+        * key field (of key_length bytes) contains the key in encrypted form;
+        * if packet body length = 44, key field is absent and key_length
+        * represents the number of least significant octets from
+        * MS-MPPE-Send-Key attribute to be used as the keying material;
+        * RC4 key used in encryption = Key-IV + MS-MPPE-Recv-Key */
+} __attribute__ ((packed));
+
+#define WPA_NONCE_LEN 32
+#define WPA_REPLAY_COUNTER_LEN 8
+
+struct wpa_eapol_key {
+       unsigned char type;
+       unsigned short key_info;
+       unsigned short key_length;
+       unsigned char replay_counter[WPA_REPLAY_COUNTER_LEN];
+       unsigned char key_nonce[WPA_NONCE_LEN];
+       unsigned char key_iv[16];
+       unsigned char key_rsc[8];
+       unsigned char key_id[8];        /* Reserved in IEEE 802.11i/RSN */
+       unsigned char key_mic[16];
+       unsigned short key_data_length;
+       /* followed by key_data_length bytes of key_data */
+} __attribute__ ((packed));
+
+#define WPA_KEY_INFO_TYPE_MASK (WBIT(0) | WBIT(1) | WBIT(2))
+#define WPA_KEY_INFO_TYPE_HMAC_MD5_RC4 WBIT(0)
+#define WPA_KEY_INFO_TYPE_HMAC_SHA1_AES WBIT(1)
+#define WPA_KEY_INFO_KEY_TYPE WBIT(3)  /* 1 = Pairwise, 0 = Group key */
+/* bit4..5 is used in WPA, but is reserved in IEEE 802.11i/RSN */
+#define WPA_KEY_INFO_KEY_INDEX_MASK (WBIT(4) | WBIT(5))
+#define WPA_KEY_INFO_KEY_INDEX_SHIFT 4
+#define WPA_KEY_INFO_INSTALL WBIT(6)   /* pairwise */
+#define WPA_KEY_INFO_TXRX WBIT(6)      /* group */
+#define WPA_KEY_INFO_ACK WBIT(7)
+#define WPA_KEY_INFO_MIC WBIT(8)
+#define WPA_KEY_INFO_SECURE WBIT(9)
+#define WPA_KEY_INFO_ERROR WBIT(10)
+#define WPA_KEY_INFO_REQUEST WBIT(11)
+#define WPA_KEY_INFO_ENCR_KEY_DATA WBIT(12)    /* IEEE 802.11i/RSN only */
+
+#define WPA_CAPABILITY_PREAUTH WBIT(0)
+
+#define GENERIC_INFO_ELEM 0xdd
+#define RSN_INFO_ELEM 0x30
+
+enum {
+       REASON_UNSPECIFIED = 1,
+       REASON_DEAUTH_LEAVING = 3,
+       REASON_INVALID_IE = 13,
+       REASON_MICHAEL_MIC_FAILURE = 14,
+       REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
+       REASON_GROUP_KEY_UPDATE_TIMEOUT = 16,
+       REASON_IE_IN_4WAY_DIFFERS = 17,
+       REASON_GROUP_CIPHER_NOT_VALID = 18,
+       REASON_PAIRWISE_CIPHER_NOT_VALID = 19,
+       REASON_AKMP_NOT_VALID = 20,
+       REASON_UNSUPPORTED_RSN_IE_VERSION = 21,
+       REASON_INVALID_RSN_IE_CAPAB = 22,
+       REASON_IEEE_802_1X_AUTH_FAILED = 23,
+       REASON_CIPHER_SUITE_REJECTED = 24
+};
+
+#endif /* EAP_PACKET_H */
diff --git a/drivers/staging/ks7010/ks7010_sdio.c b/drivers/staging/ks7010/ks7010_sdio.c
new file mode 100644 (file)
index 0000000..b7337fd
--- /dev/null
@@ -0,0 +1,1236 @@
+/*
+ *   Driver for KeyStream, KS7010 based SDIO cards.
+ *
+ *   Copyright (C) 2006-2008 KeyStream Corp.
+ *   Copyright (C) 2009 Renesas Technology Corp.
+ *   Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License version 2 as
+ *   published by the Free Software Foundation.
+ */
+
+#include <linux/firmware.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/workqueue.h>
+#include <asm/atomic.h>
+
+#include "ks_wlan.h"
+#include "ks_wlan_ioctl.h"
+#include "ks_hostif.h"
+#include "ks7010_sdio.h"
+
+#define KS7010_FUNC_NUM 1
+#define KS7010_IO_BLOCK_SIZE 512
+#define KS7010_MAX_CLOCK 25000000
+
+static const struct sdio_device_id ks7010_sdio_ids[] = {
+       {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_A, SDIO_DEVICE_ID_KS_7010)},
+       {SDIO_DEVICE(SDIO_VENDOR_ID_KS_CODE_B, SDIO_DEVICE_ID_KS_7010)},
+       { /* all zero */ }
+};
+MODULE_DEVICE_TABLE(sdio, ks7010_sdio_ids);
+
+/* macro */
+
+#define inc_txqhead(priv) \
+        ( priv->tx_dev.qhead = (priv->tx_dev.qhead + 1) % TX_DEVICE_BUFF_SIZE )
+#define inc_txqtail(priv) \
+        ( priv->tx_dev.qtail = (priv->tx_dev.qtail + 1) % TX_DEVICE_BUFF_SIZE )
+#define cnt_txqbody(priv) \
+        (((priv->tx_dev.qtail + TX_DEVICE_BUFF_SIZE) - (priv->tx_dev.qhead)) % TX_DEVICE_BUFF_SIZE )
+
+#define inc_rxqhead(priv) \
+        ( priv->rx_dev.qhead = (priv->rx_dev.qhead + 1) % RX_DEVICE_BUFF_SIZE )
+#define inc_rxqtail(priv) \
+        ( priv->rx_dev.qtail = (priv->rx_dev.qtail + 1) % RX_DEVICE_BUFF_SIZE )
+#define cnt_rxqbody(priv) \
+        (((priv->rx_dev.qtail + RX_DEVICE_BUFF_SIZE) - (priv->rx_dev.qhead)) % RX_DEVICE_BUFF_SIZE )
+
+static int ks7010_sdio_read(struct ks_wlan_private *priv, unsigned int address,
+                           unsigned char *buffer, int length)
+{
+       struct ks_sdio_card *card;
+       int rc;
+
+       card = priv->ks_wlan_hw.sdio_card;
+
+       if (length == 1)        /* CMD52 */
+               *buffer = sdio_readb(card->func, address, &rc);
+       else    /* CMD53 multi-block transfer */
+               rc = sdio_memcpy_fromio(card->func, buffer, address, length);
+
+       if (rc != 0)
+               DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
+
+       return rc;
+}
+
+static int ks7010_sdio_write(struct ks_wlan_private *priv, unsigned int address,
+                            unsigned char *buffer, int length)
+{
+       struct ks_sdio_card *card;
+       int rc;
+
+       card = priv->ks_wlan_hw.sdio_card;
+
+       if (length == 1)        /* CMD52 */
+               sdio_writeb(card->func, *buffer, (unsigned int)address, &rc);
+       else    /* CMD53 */
+               rc = sdio_memcpy_toio(card->func, (unsigned int)address, buffer,
+                                     length);
+
+       if (rc != 0)
+               DPRINTK(1, "sdio error=%d size=%d\n", rc, length);
+
+       return rc;
+}
+
+void ks_wlan_hw_sleep_doze_request(struct ks_wlan_private *priv)
+{
+       unsigned char rw_data;
+       int retval;
+
+       DPRINTK(4, "\n");
+
+       /* clear request */
+       atomic_set(&priv->sleepstatus.doze_request, 0);
+
+       if (atomic_read(&priv->sleepstatus.status) == 0) {
+               rw_data = GCR_B_DOZE;
+               retval =
+                   ks7010_sdio_write(priv, GCR_B, &rw_data, sizeof(rw_data));
+               if (retval) {
+                       DPRINTK(1, " error : GCR_B=%02X\n", rw_data);
+                       goto out;
+               }
+               DPRINTK(4, "PMG SET!! : GCR_B=%02X\n", rw_data);
+               DPRINTK(3, "sleep_mode=SLP_SLEEP\n");
+               atomic_set(&priv->sleepstatus.status, 1);
+               priv->last_doze = jiffies;
+       } else {
+               DPRINTK(1, "sleep_mode=%d\n", priv->sleep_mode);
+       }
+
+ out:
+       priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
+       return;
+}
+
+void ks_wlan_hw_sleep_wakeup_request(struct ks_wlan_private *priv)
+{
+       unsigned char rw_data;
+       int retval;
+
+       DPRINTK(4, "\n");
+
+       /* clear request */
+       atomic_set(&priv->sleepstatus.wakeup_request, 0);
+
+       if (atomic_read(&priv->sleepstatus.status) == 1) {
+               rw_data = WAKEUP_REQ;
+               retval =
+                   ks7010_sdio_write(priv, WAKEUP, &rw_data, sizeof(rw_data));
+               if (retval) {
+                       DPRINTK(1, " error : WAKEUP=%02X\n", rw_data);
+                       goto out;
+               }
+               DPRINTK(4, "wake up : WAKEUP=%02X\n", rw_data);
+               atomic_set(&priv->sleepstatus.status, 0);
+               priv->last_wakeup = jiffies;
+               ++priv->wakeup_count;
+       } else {
+               DPRINTK(1, "sleep_mode=%d\n", priv->sleep_mode);
+       }
+
+ out:
+       priv->sleep_mode = atomic_read(&priv->sleepstatus.status);
+       return;
+}
+
+void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv)
+{
+       unsigned char rw_data;
+       int retval;
+
+       DPRINTK(4, "\n");
+       if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+               rw_data = WAKEUP_REQ;
+               retval =
+                   ks7010_sdio_write(priv, WAKEUP, &rw_data, sizeof(rw_data));
+               if (retval) {
+                       DPRINTK(1, " error : WAKEUP=%02X\n", rw_data);
+               }
+               DPRINTK(4, "wake up : WAKEUP=%02X\n", rw_data);
+               priv->last_wakeup = jiffies;
+               ++priv->wakeup_count;
+       } else {
+               DPRINTK(1, "psstatus=%d\n",
+                       atomic_read(&priv->psstatus.status));
+       }
+}
+
+int _ks_wlan_hw_power_save(struct ks_wlan_private *priv)
+{
+       int rc = 0;
+       unsigned char rw_data;
+       int retval;
+
+       if (priv->reg.powermgt == POWMGT_ACTIVE_MODE)
+               return rc;
+
+       if (priv->reg.operation_mode == MODE_INFRASTRUCTURE &&
+           (priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+
+               //DPRINTK(1,"psstatus.status=%d\n",atomic_read(&priv->psstatus.status));
+               if (priv->dev_state == DEVICE_STATE_SLEEP) {
+                       switch (atomic_read(&priv->psstatus.status)) {
+                       case PS_SNOOZE: /* 4 */
+                               break;
+                       default:
+                               DPRINTK(5, "\npsstatus.status=%d\npsstatus.confirm_wait=%d\npsstatus.snooze_guard=%d\ncnt_txqbody=%d\n",
+                                       atomic_read(&priv->psstatus.status),
+                                       atomic_read(&priv->psstatus.confirm_wait),
+                                       atomic_read(&priv->psstatus.snooze_guard),
+                                       cnt_txqbody(priv));
+
+                               if (!atomic_read(&priv->psstatus.confirm_wait)
+                                   && !atomic_read(&priv->psstatus.snooze_guard)
+                                   && !cnt_txqbody(priv)) {
+                                       retval =
+                                           ks7010_sdio_read(priv, INT_PENDING,
+                                                            &rw_data,
+                                                            sizeof(rw_data));
+                                       if (retval) {
+                                               DPRINTK(1,
+                                                       " error : INT_PENDING=%02X\n",
+                                                       rw_data);
+                                               queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                                                                  &priv->ks_wlan_hw.rw_wq, 1);
+                                               break;
+                                       }
+                                       if (!rw_data) {
+                                               rw_data = GCR_B_DOZE;
+                                               retval =
+                                                   ks7010_sdio_write(priv,
+                                                                     GCR_B,
+                                                                     &rw_data,
+                                                                     sizeof(rw_data));
+                                               if (retval) {
+                                                       DPRINTK(1,
+                                                               " error : GCR_B=%02X\n",
+                                                               rw_data);
+                                                       queue_delayed_work
+                                                           (priv->ks_wlan_hw.ks7010sdio_wq,
+                                                            &priv->ks_wlan_hw.rw_wq, 1);
+                                                       break;
+                                               }
+                                               DPRINTK(4,
+                                                       "PMG SET!! : GCR_B=%02X\n",
+                                                       rw_data);
+                                               atomic_set(&priv->psstatus.
+                                                          status, PS_SNOOZE);
+                                               DPRINTK(3,
+                                                       "psstatus.status=PS_SNOOZE\n");
+                                       } else {
+                                               queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                                                                  &priv->ks_wlan_hw.rw_wq, 1);
+                                       }
+                               } else {
+                                       queue_delayed_work(priv->ks_wlan_hw.
+                                                          ks7010sdio_wq,
+                                                          &priv->ks_wlan_hw.rw_wq,
+                                                          0);
+                               }
+                               break;
+                       }
+               }
+
+       }
+
+       return rc;
+}
+
+int ks_wlan_hw_power_save(struct ks_wlan_private *priv)
+{
+       queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                          &priv->ks_wlan_hw.rw_wq, 1);
+       return 0;
+}
+
+static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
+                        unsigned long size,
+                        void (*complete_handler) (void *arg1, void *arg2),
+                        void *arg1, void *arg2)
+{
+       struct tx_device_buffer *sp;
+
+       if (priv->dev_state < DEVICE_STATE_BOOT) {
+               kfree(p);
+               if (complete_handler != NULL)
+                       (*complete_handler) (arg1, arg2);
+               return 1;
+       }
+
+       if ((TX_DEVICE_BUFF_SIZE - 1) <= cnt_txqbody(priv)) {
+               /* in case of buffer overflow */
+               DPRINTK(1, "tx buffer overflow\n");
+               kfree(p);
+               if (complete_handler != NULL)
+                       (*complete_handler) (arg1, arg2);
+               return 1;
+       }
+
+       sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qtail];
+       sp->sendp = p;
+       sp->size = size;
+       sp->complete_handler = complete_handler;
+       sp->arg1 = arg1;
+       sp->arg2 = arg2;
+       inc_txqtail(priv);
+
+       return 0;
+}
+
+/* write data */
+static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
+                          unsigned long size)
+{
+       int rc, retval;
+       unsigned char rw_data;
+       struct hostif_hdr *hdr;
+       hdr = (struct hostif_hdr *)buffer;
+       rc = 0;
+
+       DPRINTK(4, "size=%d\n", hdr->size);
+       if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
+               DPRINTK(1, "unknown event=%04X\n", hdr->event);
+               return 0;
+       }
+
+       retval = ks7010_sdio_write(priv, DATA_WINDOW, buffer, size);
+       if (retval) {
+               DPRINTK(1, " write error : retval=%d\n", retval);
+               return -4;
+       }
+
+       rw_data = WRITE_STATUS_BUSY;
+       retval =
+           ks7010_sdio_write(priv, WRITE_STATUS, &rw_data, sizeof(rw_data));
+       if (retval) {
+               DPRINTK(1, " error : WRITE_STATUS=%02X\n", rw_data);
+               return -3;
+       }
+
+       return 0;
+}
+
+static void tx_device_task(void *dev)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+       struct tx_device_buffer *sp;
+       int rc = 0;
+
+       DPRINTK(4, "\n");
+       if (cnt_txqbody(priv) > 0
+           && atomic_read(&priv->psstatus.status) != PS_SNOOZE) {
+               sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
+               if (priv->dev_state >= DEVICE_STATE_BOOT) {
+                       rc = write_to_device(priv, sp->sendp, sp->size);
+                       if (rc) {
+                               DPRINTK(1, "write_to_device error !!(%d)\n",
+                                       rc);
+                               queue_delayed_work(priv->ks_wlan_hw.
+                                                  ks7010sdio_wq,
+                                                  &priv->ks_wlan_hw.rw_wq, 1);
+                               return;
+                       }
+
+               }
+               kfree(sp->sendp);       /* allocated memory free */
+               if (sp->complete_handler != NULL)       /* TX Complete */
+                       (*sp->complete_handler) (sp->arg1, sp->arg2);
+               inc_txqhead(priv);
+
+               if (cnt_txqbody(priv) > 0) {
+                       queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                                          &priv->ks_wlan_hw.rw_wq, 0);
+               }
+       }
+       return;
+}
+
+int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p, unsigned long size,
+                 void (*complete_handler) (void *arg1, void *arg2),
+                 void *arg1, void *arg2)
+{
+       int result = 0;
+       struct hostif_hdr *hdr;
+       hdr = (struct hostif_hdr *)p;
+
+       if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
+               DPRINTK(1, "unknown event=%04X\n", hdr->event);
+               return 0;
+       }
+
+       /* add event to hostt buffer */
+       priv->hostt.buff[priv->hostt.qtail] = hdr->event;
+       priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
+
+       DPRINTK(4, "event=%04X\n", hdr->event);
+       spin_lock(&priv->tx_dev.tx_dev_lock);
+       result = enqueue_txdev(priv, p, size, complete_handler, arg1, arg2);
+       spin_unlock(&priv->tx_dev.tx_dev_lock);
+
+       if (cnt_txqbody(priv) > 0) {
+               queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                                  &priv->ks_wlan_hw.rw_wq, 0);
+       }
+       return result;
+}
+
+static void rx_event_task(unsigned long dev)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+       struct rx_device_buffer *rp;
+
+       DPRINTK(4, "\n");
+
+       if (cnt_rxqbody(priv) > 0 && priv->dev_state >= DEVICE_STATE_BOOT) {
+               rp = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qhead];
+               hostif_receive(priv, rp->data, rp->size);
+               inc_rxqhead(priv);
+
+               if (cnt_rxqbody(priv) > 0) {
+                       tasklet_schedule(&priv->ks_wlan_hw.rx_bh_task);
+               }
+       }
+
+       return;
+}
+
+static void ks_wlan_hw_rx(void *dev, uint16_t size)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+       int retval;
+       struct rx_device_buffer *rx_buffer;
+       struct hostif_hdr *hdr;
+       unsigned char read_status;
+       unsigned short event = 0;
+
+       DPRINTK(4, "\n");
+
+       /* receive data */
+       if (cnt_rxqbody(priv) >= (RX_DEVICE_BUFF_SIZE - 1)) {
+               /* in case of buffer overflow */
+               DPRINTK(1, "rx buffer overflow \n");
+               goto error_out;
+       }
+       rx_buffer = &priv->rx_dev.rx_dev_buff[priv->rx_dev.qtail];
+
+       retval =
+           ks7010_sdio_read(priv, DATA_WINDOW, &rx_buffer->data[0],
+                            hif_align_size(size));
+       if (retval) {
+               goto error_out;
+       }
+
+       /* length check */
+       if (size > 2046 || size == 0) {
+#ifdef KS_WLAN_DEBUG
+               if (KS_WLAN_DEBUG > 5)
+                       print_hex_dump_bytes("INVALID DATA dump: ",
+                                            DUMP_PREFIX_OFFSET,
+                                            rx_buffer->data, 32);
+#endif
+               /* rx_status update */
+               read_status = READ_STATUS_IDLE;
+               retval =
+                   ks7010_sdio_write(priv, READ_STATUS, &read_status,
+                                     sizeof(read_status));
+               if (retval) {
+                       DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
+               }
+               goto error_out;
+       }
+
+       hdr = (struct hostif_hdr *)&rx_buffer->data[0];
+       rx_buffer->size = le16_to_cpu(hdr->size) + sizeof(hdr->size);
+       event = hdr->event;
+       inc_rxqtail(priv);
+
+       /* read status update */
+       read_status = READ_STATUS_IDLE;
+       retval =
+           ks7010_sdio_write(priv, READ_STATUS, &read_status,
+                             sizeof(read_status));
+       if (retval) {
+               DPRINTK(1, " error : READ_STATUS=%02X\n", read_status);
+       }
+       DPRINTK(4, "READ_STATUS=%02X\n", read_status);
+
+       if (atomic_read(&priv->psstatus.confirm_wait)) {
+               if (IS_HIF_CONF(event)) {
+                       DPRINTK(4, "IS_HIF_CONF true !!\n");
+                       atomic_dec(&priv->psstatus.confirm_wait);
+               }
+       }
+
+       /* rx_event_task((void *)priv); */
+       tasklet_schedule(&priv->ks_wlan_hw.rx_bh_task);
+
+ error_out:
+       return;
+}
+
+static void ks7010_rw_function(struct work_struct *work)
+{
+       struct hw_info_t *hw;
+       struct ks_wlan_private *priv;
+       unsigned char rw_data;
+       int retval;
+
+       hw = container_of(work, struct hw_info_t, rw_wq.work);
+       priv = container_of(hw, struct ks_wlan_private, ks_wlan_hw);
+
+       DPRINTK(4, "\n");
+
+       /* wiat after DOZE */
+       if (time_after(priv->last_doze + ((30 * HZ) / 1000), jiffies)) {
+               DPRINTK(4, "wait after DOZE \n");
+               queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                                  &priv->ks_wlan_hw.rw_wq, 1);
+               return;
+       }
+
+       /* wiat after WAKEUP */
+       while (time_after(priv->last_wakeup + ((30 * HZ) / 1000), jiffies)) {
+               DPRINTK(4, "wait after WAKEUP \n");
+/*             queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,&priv->ks_wlan_hw.rw_wq,
+               (priv->last_wakeup + ((30*HZ)/1000) - jiffies));*/
+               printk("wake: %lu %lu\n", priv->last_wakeup + (30 * HZ) / 1000,
+                      jiffies);
+               msleep(30);
+       }
+
+       sdio_claim_host(priv->ks_wlan_hw.sdio_card->func);
+
+       /* power save wakeup */
+       if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+               if (cnt_txqbody(priv) > 0) {
+                       ks_wlan_hw_wakeup_request(priv);
+                       queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                                          &priv->ks_wlan_hw.rw_wq, 1);
+               }
+               goto err_out;
+       }
+
+       /* sleep mode doze */
+       if (atomic_read(&priv->sleepstatus.doze_request) == 1) {
+               ks_wlan_hw_sleep_doze_request(priv);
+               goto err_out;
+       }
+       /* sleep mode wakeup */
+       if (atomic_read(&priv->sleepstatus.wakeup_request) == 1) {
+               ks_wlan_hw_sleep_wakeup_request(priv);
+               goto err_out;
+       }
+
+       /* read (WriteStatus/ReadDataSize FN1:00_0014) */
+       retval =
+           ks7010_sdio_read(priv, WSTATUS_RSIZE, &rw_data, sizeof(rw_data));
+       if (retval) {
+               DPRINTK(1, " error : WSTATUS_RSIZE=%02X psstatus=%d\n", rw_data,
+                       atomic_read(&priv->psstatus.status));
+               goto err_out;
+       }
+       DPRINTK(4, "WSTATUS_RSIZE=%02X\n", rw_data);
+
+       if (rw_data & RSIZE_MASK) {     /* Read schedule */
+               ks_wlan_hw_rx((void *)priv,
+                             (uint16_t) (((rw_data & RSIZE_MASK) << 4)));
+       }
+       if ((rw_data & WSTATUS_MASK)) {
+               tx_device_task((void *)priv);
+       }
+       _ks_wlan_hw_power_save(priv);
+
+ err_out:
+       sdio_release_host(priv->ks_wlan_hw.sdio_card->func);
+
+       return;
+}
+
+static void ks_sdio_interrupt(struct sdio_func *func)
+{
+       int retval;
+       struct ks_sdio_card *card;
+       struct ks_wlan_private *priv;
+       unsigned char status, rsize, rw_data;
+
+       card = sdio_get_drvdata(func);
+       priv = card->priv;
+       DPRINTK(4, "\n");
+
+       if (priv->dev_state >= DEVICE_STATE_BOOT) {
+               retval =
+                   ks7010_sdio_read(priv, INT_PENDING, &status,
+                                    sizeof(status));
+               if (retval) {
+                       DPRINTK(1, "read INT_PENDING Failed!!(%d)\n", retval);
+                       goto intr_out;
+               }
+               DPRINTK(4, "INT_PENDING=%02X\n", rw_data);
+
+               /* schedule task for interrupt status */
+               /* bit7 -> Write General Communication B register */
+               /* read (General Communication B register) */
+               /* bit5 -> Write Status Idle */
+               /* bit2 -> Read Status Busy  */
+               if (status & INT_GCR_B
+                   || atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+                       retval =
+                           ks7010_sdio_read(priv, GCR_B, &rw_data,
+                                            sizeof(rw_data));
+                       if (retval) {
+                               DPRINTK(1, " error : GCR_B=%02X\n", rw_data);
+                               goto intr_out;
+                       }
+                       /* DPRINTK(1, "GCR_B=%02X\n", rw_data); */
+                       if (rw_data == GCR_B_ACTIVE) {
+                               if (atomic_read(&priv->psstatus.status) ==
+                                   PS_SNOOZE) {
+                                       atomic_set(&priv->psstatus.status,
+                                                  PS_WAKEUP);
+                                       priv->wakeup_count = 0;
+                               }
+                               complete(&priv->psstatus.wakeup_wait);
+                       }
+
+               }
+
+               do {
+                       /* read (WriteStatus/ReadDataSize FN1:00_0014) */
+                       retval =
+                           ks7010_sdio_read(priv, WSTATUS_RSIZE, &rw_data,
+                                            sizeof(rw_data));
+                       if (retval) {
+                               DPRINTK(1, " error : WSTATUS_RSIZE=%02X\n",
+                                       rw_data);
+                               goto intr_out;
+                       }
+                       DPRINTK(4, "WSTATUS_RSIZE=%02X\n", rw_data);
+                       rsize = rw_data & RSIZE_MASK;
+                       if (rsize) {    /* Read schedule */
+                               ks_wlan_hw_rx((void *)priv,
+                                             (uint16_t) (((rsize) << 4)));
+                       }
+                       if (rw_data & WSTATUS_MASK) {
+#if 0
+                               if (status & INT_WRITE_STATUS
+                                   && !cnt_txqbody(priv)) {
+                                       /* dummy write for interrupt clear */
+                                       rw_data = 0;
+                                       retval =
+                                           ks7010_sdio_write(priv, DATA_WINDOW,
+                                                             &rw_data,
+                                                             sizeof(rw_data));
+                                       if (retval) {
+                                               DPRINTK(1,
+                                                       "write DATA_WINDOW Failed!!(%d)\n",
+                                                       retval);
+                                       }
+                                       status &= ~INT_WRITE_STATUS;
+                               } else {
+#endif
+                                       if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) {
+                                               if (cnt_txqbody(priv)) {
+                                                       ks_wlan_hw_wakeup_request(priv);
+                                                       queue_delayed_work
+                                                           (priv->ks_wlan_hw.
+                                                            ks7010sdio_wq,
+                                                            &priv->ks_wlan_hw.
+                                                            rw_wq, 1);
+                                                       return;
+                                               }
+                                       } else {
+                                               tx_device_task((void *)priv);
+                                       }
+#if 0
+                               }
+#endif
+                       }
+               } while (rsize);
+       }
+
+ intr_out:
+       queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                          &priv->ks_wlan_hw.rw_wq, 0);
+       return;
+}
+
+static int trx_device_init(struct ks_wlan_private *priv)
+{
+       /* initialize values (tx) */
+       priv->tx_dev.qtail = priv->tx_dev.qhead = 0;
+
+       /* initialize values (rx) */
+       priv->rx_dev.qtail = priv->rx_dev.qhead = 0;
+
+       /* initialize spinLock (tx,rx) */
+       spin_lock_init(&priv->tx_dev.tx_dev_lock);
+       spin_lock_init(&priv->rx_dev.rx_dev_lock);
+
+       tasklet_init(&priv->ks_wlan_hw.rx_bh_task, rx_event_task,
+                    (unsigned long)priv);
+
+       return 0;
+}
+
+static void trx_device_exit(struct ks_wlan_private *priv)
+{
+       struct tx_device_buffer *sp;
+
+       /* tx buffer clear */
+       while (cnt_txqbody(priv) > 0) {
+               sp = &priv->tx_dev.tx_dev_buff[priv->tx_dev.qhead];
+               kfree(sp->sendp);       /* allocated memory free */
+               if (sp->complete_handler != NULL)       /* TX Complete */
+                       (*sp->complete_handler) (sp->arg1, sp->arg2);
+               inc_txqhead(priv);
+       }
+
+       tasklet_kill(&priv->ks_wlan_hw.rx_bh_task);
+
+       return;
+}
+
+static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
+{
+       int rc = 0;
+       int retval;
+       unsigned char *data_buf;
+       data_buf = NULL;
+
+       data_buf = kmalloc(sizeof(u32), GFP_KERNEL);
+       if (!data_buf) {
+               rc = 1;
+               goto error_out;
+       }
+
+       memcpy(data_buf, &index, sizeof(index));
+       retval = ks7010_sdio_write(priv, WRITE_INDEX, data_buf, sizeof(index));
+       if (retval) {
+               rc = 2;
+               goto error_out;
+       }
+
+       retval = ks7010_sdio_write(priv, READ_INDEX, data_buf, sizeof(index));
+       if (retval) {
+               rc = 3;
+               goto error_out;
+       }
+ error_out:
+       if (data_buf)
+               kfree(data_buf);
+       return rc;
+}
+
+#define ROM_BUFF_SIZE (64*1024)
+static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
+                                   unsigned char *data, unsigned int size)
+{
+       int rc = 0;
+       int retval;
+       unsigned char *read_buf;
+       read_buf = NULL;
+       read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
+       if (!read_buf) {
+               rc = 1;
+               goto error_out;
+       }
+       retval = ks7010_sdio_read(priv, address, read_buf, size);
+       if (retval) {
+               rc = 2;
+               goto error_out;
+       }
+       retval = memcmp(data, read_buf, size);
+
+       if (retval) {
+               DPRINTK(0, "data compare error (%d) \n", retval);
+               rc = 3;
+               goto error_out;
+       }
+ error_out:
+       if (read_buf)
+               kfree(read_buf);
+       return rc;
+}
+
+static int ks7010_upload_firmware(struct ks_wlan_private *priv,
+                                 struct ks_sdio_card *card)
+{
+       unsigned int size, offset, n = 0;
+       unsigned char *rom_buf;
+       unsigned char rw_data = 0;
+       int retval, rc = 0;
+       int length;
+       const struct firmware *fw_entry = NULL;
+
+       rom_buf = NULL;
+
+       /* buffer allocate */
+       rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
+       if (!rom_buf) {
+               rc = 3;
+               goto error_out0;
+       }
+
+       sdio_claim_host(card->func);
+
+       /* Firmware running ? */
+       retval = ks7010_sdio_read(priv, GCR_A, &rw_data, sizeof(rw_data));
+       if (rw_data == GCR_A_RUN) {
+               DPRINTK(0, "MAC firmware running ...\n");
+               rc = 0;
+               goto error_out0;
+       }
+
+       retval = request_firmware(&fw_entry, ROM_FILE, &priv->ks_wlan_hw.sdio_card->func->dev);
+       if (retval)
+               return retval;
+
+       length = fw_entry->size;
+
+       /* Load Program */
+       n = 0;
+       do {
+               if (length >= ROM_BUFF_SIZE) {
+                       size = ROM_BUFF_SIZE;
+                       length = length - ROM_BUFF_SIZE;
+               } else {
+                       size = length;
+                       length = 0;
+               }
+               DPRINTK(4, "size = %d\n", size);
+               if (size == 0)
+                       break;
+               memcpy(rom_buf, fw_entry->data + n, size);
+               /* Update write index */
+               offset = n;
+               retval =
+                   ks7010_sdio_update_index(priv,
+                                            KS7010_IRAM_ADDRESS + offset);
+               if (retval) {
+                       rc = 6;
+                       goto error_out1;
+               }
+
+               /* Write data */
+               retval = ks7010_sdio_write(priv, DATA_WINDOW, rom_buf, size);
+               if (retval) {
+                       rc = 8;
+                       goto error_out1;
+               }
+
+               /* compare */
+               retval =
+                   ks7010_sdio_data_compare(priv, DATA_WINDOW, rom_buf, size);
+               if (retval) {
+                       rc = 9;
+                       goto error_out1;
+               }
+               n += size;
+
+       } while (size);
+
+       /* Remap request */
+       rw_data = GCR_A_REMAP;
+       retval = ks7010_sdio_write(priv, GCR_A, &rw_data, sizeof(rw_data));
+       if (retval) {
+               rc = 11;
+               goto error_out1;
+       }
+       DPRINTK(4, " REMAP Request : GCR_A=%02X\n", rw_data);
+
+       /* Firmware running check */
+       for (n = 0; n < 50; ++n) {
+               mdelay(10);     /* wait_ms(10); */
+               retval =
+                   ks7010_sdio_read(priv, GCR_A, &rw_data, sizeof(rw_data));
+               if (retval) {
+                       rc = 11;
+                       goto error_out1;
+               }
+               if (rw_data == GCR_A_RUN)
+                       break;
+       }
+       DPRINTK(4, "firmware wakeup (%d)!!!!\n", n);
+       if ((50) <= n) {
+               DPRINTK(1, "firmware can't start\n");
+               rc = 12;
+               goto error_out1;
+       }
+
+       rc = 0;
+
+ error_out1:
+       release_firmware(fw_entry);
+ error_out0:
+       sdio_release_host(card->func);
+       if (rom_buf)
+               kfree(rom_buf);
+       return rc;
+}
+
+static void ks7010_card_init(struct ks_wlan_private *priv)
+{
+       DPRINTK(5, "\ncard_init_task()\n");
+
+       /* init_waitqueue_head(&priv->confirm_wait); */
+       init_completion(&priv->confirm_wait);
+
+       DPRINTK(5, "init_completion()\n");
+
+       /* get mac address & firmware version */
+       hostif_sme_enqueue(priv, SME_START);
+
+       DPRINTK(5, "hostif_sme_enqueu()\n");
+
+       if (!wait_for_completion_interruptible_timeout
+           (&priv->confirm_wait, 5 * HZ)) {
+               DPRINTK(1, "wait time out!! SME_START\n");
+       }
+
+       if (priv->mac_address_valid && priv->version_size) {
+               priv->dev_state = DEVICE_STATE_PREINIT;
+       }
+
+       hostif_sme_enqueue(priv, SME_GET_EEPROM_CKSUM);
+
+       /* load initial wireless parameter */
+       hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+
+       hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
+       hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
+
+       hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
+       hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
+       hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
+       hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
+       hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
+
+       hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
+       hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
+       hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
+       hostif_sme_enqueue(priv, SME_START_REQUEST);
+
+       if (!wait_for_completion_interruptible_timeout
+           (&priv->confirm_wait, 5 * HZ)) {
+               DPRINTK(1, "wait time out!! wireless parameter set\n");
+       }
+
+       if (priv->dev_state >= DEVICE_STATE_PREINIT) {
+               DPRINTK(1, "DEVICE READY!!\n");
+               priv->dev_state = DEVICE_STATE_READY;
+       } else {
+               DPRINTK(1, "dev_state=%d\n", priv->dev_state);
+       }
+}
+
+static void ks7010_init_defaults(struct ks_wlan_private *priv)
+{
+       priv->reg.tx_rate = TX_RATE_AUTO;
+       priv->reg.preamble = LONG_PREAMBLE;
+       priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+       priv->reg.scan_type = ACTIVE_SCAN;
+       priv->reg.beacon_lost_count = 20;
+       priv->reg.rts = 2347UL;
+       priv->reg.fragment = 2346UL;
+       priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
+       priv->reg.cts_mode = CTS_MODE_FALSE;
+       priv->reg.rate_set.body[11] = TX_RATE_54M;
+       priv->reg.rate_set.body[10] = TX_RATE_48M;
+       priv->reg.rate_set.body[9] = TX_RATE_36M;
+       priv->reg.rate_set.body[8] = TX_RATE_18M;
+       priv->reg.rate_set.body[7] = TX_RATE_9M;
+       priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
+       priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
+       priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
+       priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
+       priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
+       priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
+       priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
+       priv->reg.tx_rate = TX_RATE_FULL_AUTO;
+       priv->reg.rate_set.size = 12;
+}
+
+static int ks7010_sdio_probe(struct sdio_func *func,
+                            const struct sdio_device_id *device)
+{
+       struct ks_wlan_private *priv;
+       struct ks_sdio_card *card;
+       struct net_device *netdev;
+       unsigned char rw_data;
+       int ret;
+
+       DPRINTK(5, "ks7010_sdio_probe()\n");
+
+       priv = NULL;
+       netdev = NULL;
+
+       /* initilize ks_sdio_card */
+       card = kzalloc(sizeof(struct ks_sdio_card), GFP_KERNEL);
+       if (!card)
+               return -ENOMEM;
+
+       card->func = func;
+       spin_lock_init(&card->lock);
+
+       /*** Initialize  SDIO ***/
+       sdio_claim_host(func);
+
+       /* bus setting  */
+       /* Issue config request to override clock rate */
+
+       /* function blocksize set */
+       ret = sdio_set_block_size(func, KS7010_IO_BLOCK_SIZE);
+       DPRINTK(5, "multi_block=%d sdio_set_block_size()=%d %d\n",
+               func->card->cccr.multi_block, func->cur_blksize, ret);
+
+       /* Allocate the slot current */
+
+       /* function enable */
+       ret = sdio_enable_func(func);
+       DPRINTK(5, "sdio_enable_func() %d\n", ret);
+       if (ret)
+               goto error_free_card;
+
+       /* interrupt disable */
+       sdio_writeb(func, 0, INT_ENABLE, &ret);
+       if (ret)
+               goto error_free_card;
+       sdio_writeb(func, 0xff, INT_PENDING, &ret);
+       if (ret)
+               goto error_disable_func;
+
+       /* setup interrupt handler */
+       ret = sdio_claim_irq(func, ks_sdio_interrupt);
+       if (ret)
+               goto error_disable_func;
+
+       sdio_release_host(func);
+
+       sdio_set_drvdata(func, card);
+
+       DPRINTK(5, "class = 0x%X, vendor = 0x%X, "
+               "device = 0x%X\n", func->class, func->vendor, func->device);
+
+       /* private memory allocate */
+       netdev = alloc_etherdev(sizeof(*priv));
+       if (netdev == NULL) {
+               printk(KERN_ERR "ks7010 : Unable to alloc new net device\n");
+               goto error_release_irq;
+       }
+       if (dev_alloc_name(netdev, "wlan%d") < 0) {
+               printk(KERN_ERR "ks7010 :  Couldn't get name!\n");
+               goto error_free_netdev;
+       }
+
+       priv = netdev_priv(netdev);
+
+       card->priv = priv;
+       SET_NETDEV_DEV(netdev, &card->func->dev);       /* for create sysfs symlinks */
+
+       /* private memory initialize */
+       priv->ks_wlan_hw.sdio_card = card;
+       init_completion(&priv->ks_wlan_hw.ks7010_sdio_wait);
+       priv->ks_wlan_hw.read_buf = NULL;
+       priv->ks_wlan_hw.read_buf = kmalloc(RX_DATA_SIZE, GFP_KERNEL);
+       if (!priv->ks_wlan_hw.read_buf) {
+               goto error_free_netdev;
+       }
+       priv->dev_state = DEVICE_STATE_PREBOOT;
+       priv->net_dev = netdev;
+       priv->firmware_version[0] = '\0';
+       priv->version_size = 0;
+       priv->last_doze = jiffies;      /* set current jiffies */
+       priv->last_wakeup = jiffies;
+       memset(&priv->nstats, 0, sizeof(priv->nstats));
+       memset(&priv->wstats, 0, sizeof(priv->wstats));
+
+       /* sleep mode */
+       atomic_set(&priv->sleepstatus.doze_request, 0);
+       atomic_set(&priv->sleepstatus.wakeup_request, 0);
+       atomic_set(&priv->sleepstatus.wakeup_request, 0);
+
+       trx_device_init(priv);
+       hostif_init(priv);
+       ks_wlan_net_start(netdev);
+
+       ks7010_init_defaults(priv);
+
+       /* Upload firmware */
+       ret = ks7010_upload_firmware(priv, card);       /* firmware load */
+       if (ret) {
+               printk(KERN_ERR
+                      "ks7010: firmware load failed !! retern code = %d\n",
+                      ret);
+               goto error_free_read_buf;
+       }
+
+       /* interrupt setting */
+       /* clear Interrupt status write (ARMtoSD_InterruptPending FN1:00_0024) */
+       rw_data = 0xff;
+       sdio_claim_host(func);
+       ret = ks7010_sdio_write(priv, INT_PENDING, &rw_data, sizeof(rw_data));
+       sdio_release_host(func);
+       if (ret) {
+               DPRINTK(1, " error : INT_PENDING=%02X\n", rw_data);
+       }
+       DPRINTK(4, " clear Interrupt : INT_PENDING=%02X\n", rw_data);
+
+       /* enable ks7010sdio interrupt (INT_GCR_B|INT_READ_STATUS|INT_WRITE_STATUS) */
+       rw_data = (INT_GCR_B | INT_READ_STATUS | INT_WRITE_STATUS);
+       sdio_claim_host(func);
+       ret = ks7010_sdio_write(priv, INT_ENABLE, &rw_data, sizeof(rw_data));
+       sdio_release_host(func);
+       if (ret) {
+               DPRINTK(1, " error : INT_ENABLE=%02X\n", rw_data);
+       }
+       DPRINTK(4, " enable Interrupt : INT_ENABLE=%02X\n", rw_data);
+       priv->dev_state = DEVICE_STATE_BOOT;
+
+       priv->ks_wlan_hw.ks7010sdio_wq = create_workqueue("ks7010sdio_wq");
+       if (!priv->ks_wlan_hw.ks7010sdio_wq) {
+               DPRINTK(1, "create_workqueue failed !!\n");
+               goto error_free_read_buf;
+       }
+
+       INIT_DELAYED_WORK(&priv->ks_wlan_hw.rw_wq, ks7010_rw_function);
+       ks7010_card_init(priv);
+
+       ret = register_netdev(priv->net_dev);
+       if (ret)
+               goto error_free_read_buf;
+
+       return 0;
+
+ error_free_read_buf:
+       kfree(priv->ks_wlan_hw.read_buf);
+       priv->ks_wlan_hw.read_buf = NULL;
+ error_free_netdev:
+       free_netdev(priv->net_dev);
+       card->priv = NULL;
+ error_release_irq:
+       sdio_claim_host(func);
+       sdio_release_irq(func);
+ error_disable_func:
+       sdio_disable_func(func);
+ error_free_card:
+       sdio_release_host(func);
+       sdio_set_drvdata(func, NULL);
+       kfree(card);
+
+       return -ENODEV;
+}
+
+static void ks7010_sdio_remove(struct sdio_func *func)
+{
+       int ret;
+       struct ks_sdio_card *card;
+       struct ks_wlan_private *priv;
+       struct net_device *netdev;
+       DPRINTK(1, "ks7010_sdio_remove()\n");
+
+       card = sdio_get_drvdata(func);
+
+       if (card == NULL)
+               return;
+
+       DPRINTK(1, "priv = card->priv\n");
+       priv = card->priv;
+       netdev = priv->net_dev;
+       if (priv) {
+               ks_wlan_net_stop(netdev);
+               DPRINTK(1, "ks_wlan_net_stop\n");
+
+               /* interrupt disable */
+               sdio_claim_host(func);
+               sdio_writeb(func, 0, INT_ENABLE, &ret);
+               sdio_writeb(func, 0xff, INT_PENDING, &ret);
+               sdio_release_host(func);
+               DPRINTK(1, "interrupt disable\n");
+
+               /* send stop request to MAC */
+               {
+                       struct hostif_stop_request_t *pp;
+                       pp = (struct hostif_stop_request_t *)
+                           kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL);
+                       if (pp == NULL) {
+                               DPRINTK(3, "allocate memory failed..\n");
+                               return; /* to do goto ni suru */
+                       }
+                       pp->header.size =
+                           cpu_to_le16((uint16_t)
+                                       (sizeof(*pp) -
+                                        sizeof(pp->header.size)));
+                       pp->header.event = cpu_to_le16((uint16_t) HIF_STOP_REQ);
+
+                       sdio_claim_host(func);
+                       write_to_device(priv, (unsigned char *)pp,
+                                       hif_align_size(sizeof(*pp)));
+                       sdio_release_host(func);
+                       kfree(pp);
+               }
+               DPRINTK(1, "STOP Req\n");
+
+               if (priv->ks_wlan_hw.ks7010sdio_wq) {
+                       flush_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);
+                       destroy_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);
+               }
+               DPRINTK(1,
+                       "destroy_workqueue(priv->ks_wlan_hw.ks7010sdio_wq);\n");
+
+               hostif_exit(priv);
+               DPRINTK(1, "hostif_exit\n");
+
+               unregister_netdev(netdev);
+
+               trx_device_exit(priv);
+               if (priv->ks_wlan_hw.read_buf) {
+                       kfree(priv->ks_wlan_hw.read_buf);
+               }
+               free_netdev(priv->net_dev);
+               card->priv = NULL;
+       }
+
+       sdio_claim_host(func);
+       sdio_release_irq(func);
+       DPRINTK(1, "sdio_release_irq()\n");
+       sdio_disable_func(func);
+       DPRINTK(1, "sdio_disable_func()\n");
+       sdio_release_host(func);
+
+       sdio_set_drvdata(func, NULL);
+
+       kfree(card);
+       DPRINTK(1, "kfree()\n");
+
+       DPRINTK(5, " Bye !!\n");
+       return;
+}
+
+static struct sdio_driver ks7010_sdio_driver = {
+       .name = "ks7010_sdio",
+       .id_table = ks7010_sdio_ids,
+       .probe = ks7010_sdio_probe,
+       .remove = ks7010_sdio_remove,
+};
+
+module_driver(ks7010_sdio_driver, sdio_register_driver, sdio_unregister_driver);
+MODULE_AUTHOR("Sang Engineering, Qi-Hardware, KeyStream");
+MODULE_DESCRIPTION("Driver for KeyStream KS7010 based SDIO cards");
+MODULE_LICENSE("GPL v2");
+MODULE_FIRMWARE(ROM_FILE);
diff --git a/drivers/staging/ks7010/ks7010_sdio.h b/drivers/staging/ks7010/ks7010_sdio.h
new file mode 100644 (file)
index 0000000..c72064b
--- /dev/null
@@ -0,0 +1,147 @@
+/*
+ *   Driver for KeyStream, KS7010 based SDIO cards. 
+ *
+ *   Copyright (C) 2006-2008 KeyStream Corp.
+ *   Copyright (C) 2009 Renesas Technology Corp.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License version 2 as
+ *   published by the Free Software Foundation.
+ */
+#ifndef _KS7010_SDIO_H
+#define _KS7010_SDIO_H
+
+#ifdef DEVICE_ALIGNMENT
+#undef DEVICE_ALIGNMENT
+#endif
+#define DEVICE_ALIGNMENT 32
+
+/*  SDIO KeyStream vendor and device */
+#define SDIO_VENDOR_ID_KS_CODE_A       0x005b
+#define SDIO_VENDOR_ID_KS_CODE_B       0x0023
+/* Older sources suggest earlier versions were named 7910 or 79xx */
+#define SDIO_DEVICE_ID_KS_7010         0x7910
+
+/* Read Status Register */
+#define READ_STATUS            0x000000
+#define READ_STATUS_BUSY       0
+#define READ_STATUS_IDLE       1
+
+/* Read Index Register */
+#define READ_INDEX             0x000004
+
+/* Read Data Size Register */
+#define READ_DATA_SIZE         0x000008
+
+/* Write Status Register */
+#define WRITE_STATUS           0x00000C
+#define WRITE_STATUS_BUSY      0
+#define WRITE_STATUS_IDLE      1
+
+/* Write Index Register */
+#define WRITE_INDEX            0x000010
+
+/* Write Status/Read Data Size Register 
+ * for network packet (less than 2048 bytes data)
+ */
+#define WSTATUS_RSIZE          0x000014
+#define WSTATUS_MASK           0x80    /* Write Status Register value */
+#define RSIZE_MASK             0x7F    /* Read Data Size Register value [10:4] */
+
+/* ARM to SD interrupt Enable */
+#define INT_ENABLE             0x000020
+/* ARM to SD interrupt Pending */
+#define INT_PENDING            0x000024
+
+#define INT_GCR_B              (1<<7)
+#define INT_GCR_A              (1<<6)
+#define INT_WRITE_STATUS       (1<<5)
+#define INT_WRITE_INDEX                (1<<4)
+#define INT_WRITE_SIZE         (1<<3)
+#define INT_READ_STATUS                (1<<2)
+#define INT_READ_INDEX         (1<<1)
+#define INT_READ_SIZE          (1<<0)
+
+/* General Communication Register A */
+#define GCR_A                  0x000028
+#define GCR_A_INIT             0
+#define GCR_A_REMAP            1
+#define GCR_A_RUN              2
+
+/* General Communication Register B */
+#define GCR_B                  0x00002C
+#define GCR_B_ACTIVE           0
+#define GCR_B_DOZE             1
+
+/* Wakeup Register */
+/* #define WAKEUP                      0x008104 */
+/* #define WAKEUP_REQ          0x00 */
+#define WAKEUP                 0x008018
+#define WAKEUP_REQ             0x5a
+
+/* AHB Data Window  0x010000-0x01FFFF */
+#define DATA_WINDOW            0x010000
+#define WINDOW_SIZE            64*1024
+
+#define KS7010_IRAM_ADDRESS    0x06000000
+
+/* 
+ * struct define
+ */
+struct hw_info_t {
+       struct ks_sdio_card *sdio_card;
+       struct completion ks7010_sdio_wait;
+       struct workqueue_struct *ks7010sdio_wq;
+       struct delayed_work rw_wq;
+       unsigned char *read_buf;
+       struct tasklet_struct rx_bh_task;
+};
+
+struct ks_sdio_packet {
+       struct ks_sdio_packet *next;
+       u16 nb;
+       u8 buffer[0] __attribute__ ((aligned(4)));
+};
+
+struct ks_sdio_card {
+       struct sdio_func *func;
+       struct ks_wlan_private *priv;
+       spinlock_t lock;
+};
+
+/* Tx Device struct */
+#define        TX_DEVICE_BUFF_SIZE     1024
+
+struct tx_device_buffer {
+       unsigned char *sendp;   /* pointer of send req data */
+       unsigned int size;
+       void (*complete_handler) (void *arg1, void *arg2);
+       void *arg1;
+       void *arg2;
+};
+
+struct tx_device {
+       struct tx_device_buffer tx_dev_buff[TX_DEVICE_BUFF_SIZE];
+       unsigned int qhead;     /* tx buffer queue first pointer */
+       unsigned int qtail;     /* tx buffer queue last pointer */
+       spinlock_t tx_dev_lock;
+};
+
+/* Rx Device struct */
+#define        RX_DATA_SIZE    (2 + 2 + 2347 + 1)
+#define        RX_DEVICE_BUFF_SIZE     32
+
+struct rx_device_buffer {
+       unsigned char data[RX_DATA_SIZE];
+       unsigned int size;
+};
+
+struct rx_device {
+       struct rx_device_buffer rx_dev_buff[RX_DEVICE_BUFF_SIZE];
+       unsigned int qhead;     /* rx buffer queue first pointer */
+       unsigned int qtail;     /* rx buffer queue last pointer */
+       spinlock_t rx_dev_lock;
+};
+#define        ROM_FILE "ks7010sd.rom"
+
+#endif /* _KS7010_SDIO_H */
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
new file mode 100644 (file)
index 0000000..a8822fe
--- /dev/null
@@ -0,0 +1,2760 @@
+/*
+ *   Driver for KeyStream wireless LAN cards.
+ *
+ *   Copyright (C) 2005-2008 KeyStream Corp.
+ *   Copyright (C) 2009 Renesas Technology Corp.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License version 2 as
+ *   published by the Free Software Foundation.
+ */
+
+#include "ks_wlan.h"
+#include "ks_hostif.h"
+#include "eap_packet.h"
+#include "michael_mic.h"
+
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+
+/* Include Wireless Extension definition and check version */
+#include <net/iw_handler.h>    /* New driver API */
+
+extern int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
+                        unsigned long size,
+                        void (*complete_handler) (void *arg1, void *arg2),
+                        void *arg1, void *arg2);
+extern void send_packet_complete(void *, void *);
+
+extern void ks_wlan_hw_wakeup_request(struct ks_wlan_private *priv);
+extern int ks_wlan_hw_power_save(struct ks_wlan_private *priv);
+
+/* macro */
+#define inc_smeqhead(priv) \
+        ( priv->sme_i.qhead = (priv->sme_i.qhead + 1) % SME_EVENT_BUFF_SIZE )
+#define inc_smeqtail(priv) \
+        ( priv->sme_i.qtail = (priv->sme_i.qtail + 1) % SME_EVENT_BUFF_SIZE )
+#define cnt_smeqbody(priv) \
+        (((priv->sme_i.qtail + SME_EVENT_BUFF_SIZE) - (priv->sme_i.qhead)) % SME_EVENT_BUFF_SIZE )
+
+#define KS_WLAN_MEM_FLAG (GFP_ATOMIC)
+
+static
+inline u8 get_BYTE(struct ks_wlan_private *priv)
+{
+       u8 data;
+       data = *(priv->rxp)++;
+       /* length check in advance ! */
+       --(priv->rx_size);
+       return data;
+}
+
+static
+inline u16 get_WORD(struct ks_wlan_private *priv)
+{
+       u16 data;
+       data = (get_BYTE(priv) & 0xff);
+       data |= ((get_BYTE(priv) << 8) & 0xff00);
+       return data;
+}
+
+static
+inline u32 get_DWORD(struct ks_wlan_private *priv)
+{
+       u32 data;
+       data = (get_BYTE(priv) & 0xff);
+       data |= ((get_BYTE(priv) << 8) & 0x0000ff00);
+       data |= ((get_BYTE(priv) << 16) & 0x00ff0000);
+       data |= ((get_BYTE(priv) << 24) & 0xff000000);
+       return data;
+}
+
+void ks_wlan_hw_wakeup_task(struct work_struct *work)
+{
+       struct ks_wlan_private *priv =
+           container_of(work, struct ks_wlan_private, ks_wlan_wakeup_task);
+       int ps_status = atomic_read(&priv->psstatus.status);
+
+       if (ps_status == PS_SNOOZE) {
+               ks_wlan_hw_wakeup_request(priv);
+               if (!wait_for_completion_interruptible_timeout(&priv->psstatus.wakeup_wait, HZ / 50)) { /* 20ms timeout */
+                       DPRINTK(1, "wake up timeout !!!\n");
+                       schedule_work(&priv->ks_wlan_wakeup_task);
+                       return;
+               }
+       } else {
+               DPRINTK(1, "ps_status=%d\n", ps_status);
+       }
+
+       /* power save */
+       if (atomic_read(&priv->sme_task.count) > 0) {
+               DPRINTK(4, "sme task enable.\n");
+               tasklet_enable(&priv->sme_task);
+       }
+}
+
+static
+int ks_wlan_do_power_save(struct ks_wlan_private *priv)
+{
+       int rc = 0;
+
+       DPRINTK(4, "psstatus.status=%d\n", atomic_read(&priv->psstatus.status));
+
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+               hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
+       } else {
+               priv->dev_state = DEVICE_STATE_READY;
+       }
+       return rc;
+}
+
+static
+int get_current_ap(struct ks_wlan_private *priv, struct link_ap_info_t *ap_info)
+{
+       struct local_ap_t *ap;
+       union iwreq_data wrqu;
+       struct net_device *netdev = priv->net_dev;
+       int rc = 0;
+
+       DPRINTK(3, "\n");
+       ap = &(priv->current_ap);
+
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS) {
+               memset(ap, 0, sizeof(struct local_ap_t));
+               return 1;
+       }
+
+       /* bssid */
+       memcpy(&(ap->bssid[0]), &(ap_info->bssid[0]), ETH_ALEN);
+       /* essid */
+       memcpy(&(ap->ssid.body[0]), &(priv->reg.ssid.body[0]),
+              priv->reg.ssid.size);
+       ap->ssid.size = priv->reg.ssid.size;
+       /* rate_set */
+       memcpy(&(ap->rate_set.body[0]), &(ap_info->rate_set.body[0]),
+              ap_info->rate_set.size);
+       ap->rate_set.size = ap_info->rate_set.size;
+       if (ap_info->ext_rate_set.size) {
+               /* rate_set */
+               memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+                      &(ap_info->ext_rate_set.body[0]),
+                      ap_info->ext_rate_set.size);
+               ap->rate_set.size += ap_info->ext_rate_set.size;
+       }
+       /* channel */
+       ap->channel = ap_info->ds_parameter.channel;
+       /* rssi */
+       ap->rssi = ap_info->rssi;
+       /* sq */
+       ap->sq = ap_info->sq;
+       /* noise */
+       ap->noise = ap_info->noise;
+       /* capability */
+       ap->capability = ap_info->capability;
+       /* rsn */
+       if ((ap_info->rsn_mode & RSN_MODE_WPA2)
+           && (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)) {
+               ap->rsn_ie.id = 0x30;
+               if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
+                       ap->rsn_ie.size = ap_info->rsn.size;
+                       memcpy(&(ap->rsn_ie.body[0]), &(ap_info->rsn.body[0]),
+                              ap_info->rsn.size);
+               } else {
+                       ap->rsn_ie.size = RSN_IE_BODY_MAX;
+                       memcpy(&(ap->rsn_ie.body[0]), &(ap_info->rsn.body[0]),
+                              RSN_IE_BODY_MAX);
+               }
+       } else if ((ap_info->rsn_mode & RSN_MODE_WPA)
+                  && (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA)) {
+               ap->wpa_ie.id = 0xdd;
+               if (ap_info->rsn.size <= RSN_IE_BODY_MAX) {
+                       ap->wpa_ie.size = ap_info->rsn.size;
+                       memcpy(&(ap->wpa_ie.body[0]), &(ap_info->rsn.body[0]),
+                              ap_info->rsn.size);
+               } else {
+                       ap->wpa_ie.size = RSN_IE_BODY_MAX;
+                       memcpy(&(ap->wpa_ie.body[0]), &(ap_info->rsn.body[0]),
+                              RSN_IE_BODY_MAX);
+               }
+       } else {
+               ap->rsn_ie.id = 0;
+               ap->rsn_ie.size = 0;
+               ap->wpa_ie.id = 0;
+               ap->wpa_ie.size = 0;
+       }
+
+       wrqu.data.length = 0;
+       wrqu.data.flags = 0;
+       wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+               memcpy(wrqu.ap_addr.sa_data,
+                      &(priv->current_ap.bssid[0]), ETH_ALEN);
+               DPRINTK(3,
+                       "IWEVENT: connect bssid=%02x:%02x:%02x:%02x:%02x:%02x\n",
+                       (unsigned char)wrqu.ap_addr.sa_data[0],
+                       (unsigned char)wrqu.ap_addr.sa_data[1],
+                       (unsigned char)wrqu.ap_addr.sa_data[2],
+                       (unsigned char)wrqu.ap_addr.sa_data[3],
+                       (unsigned char)wrqu.ap_addr.sa_data[4],
+                       (unsigned char)wrqu.ap_addr.sa_data[5]);
+               wireless_send_event(netdev, SIOCGIWAP, &wrqu, NULL);
+       }
+       DPRINTK(4, "\n    Link AP\n");
+       DPRINTK(4, "    bssid=%02X:%02X:%02X:%02X:%02X:%02X\n \
+   essid=%s\n    rate_set=%02X,%02X,%02X,%02X,%02X,%02X,%02X,%02X\n    channel=%d\n \
+   rssi=%d\n    sq=%d\n    capability=%04X\n", ap->bssid[0], ap->bssid[1], ap->bssid[2], ap->bssid[3], ap->bssid[4], ap->bssid[5], &(ap->ssid.body[0]), ap->rate_set.body[0], ap->rate_set.body[1], ap->rate_set.body[2], ap->rate_set.body[3], ap->rate_set.body[4], ap->rate_set.body[5], ap->rate_set.body[6], ap->rate_set.body[7], ap->channel, ap->rssi, ap->sq, ap->capability);
+       DPRINTK(4, "\n    Link AP\n    rsn.mode=%d\n    rsn.size=%d\n",
+               ap_info->rsn_mode, ap_info->rsn.size);
+       DPRINTK(4, "\n    ext_rate_set_size=%d\n    rate_set_size=%d\n",
+               ap_info->ext_rate_set.size, ap_info->rate_set.size);
+
+       return rc;
+}
+
+static
+int get_ap_information(struct ks_wlan_private *priv, struct ap_info_t *ap_info,
+                      struct local_ap_t *ap)
+{
+       unsigned char *bp;
+       int bsize, offset;
+       int rc = 0;
+
+       DPRINTK(3, "\n");
+       memset(ap, 0, sizeof(struct local_ap_t));
+
+       /* bssid */
+       memcpy(&(ap->bssid[0]), &(ap_info->bssid[0]), ETH_ALEN);
+       /* rssi */
+       ap->rssi = ap_info->rssi;
+       /* sq */
+       ap->sq = ap_info->sq;
+       /* noise */
+       ap->noise = ap_info->noise;
+       /* capability */
+       ap->capability = ap_info->capability;
+       /* channel */
+       ap->channel = ap_info->ch_info;
+
+       bp = &(ap_info->body[0]);
+       bsize = ap_info->body_size;
+       offset = 0;
+
+       while (bsize > offset) {
+               /* DPRINTK(4, "Element ID=%d \n",*bp); */
+               switch (*bp) {
+               case 0: /* ssid */
+                       if (*(bp + 1) <= SSID_MAX_SIZE) {
+                               ap->ssid.size = *(bp + 1);
+                       } else {
+                               DPRINTK(1, "size over :: ssid size=%d \n",
+                                       *(bp + 1));
+                               ap->ssid.size = SSID_MAX_SIZE;
+                       }
+                       memcpy(&(ap->ssid.body[0]), bp + 2, ap->ssid.size);
+                       break;
+               case 1: /* rate */
+               case 50:        /* ext rate */
+                       if ((*(bp + 1) + ap->rate_set.size) <=
+                           RATE_SET_MAX_SIZE) {
+                               memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+                                      bp + 2, *(bp + 1));
+                               ap->rate_set.size += *(bp + 1);
+                       } else {
+                               DPRINTK(1, "size over :: rate size=%d \n",
+                                       (*(bp + 1) + ap->rate_set.size));
+                               memcpy(&(ap->rate_set.body[ap->rate_set.size]),
+                                      bp + 2,
+                                      RATE_SET_MAX_SIZE - ap->rate_set.size);
+                               ap->rate_set.size +=
+                                   (RATE_SET_MAX_SIZE - ap->rate_set.size);
+                       }
+                       break;
+               case 3: /* DS parameter */
+                       break;
+               case 48:        /* RSN(WPA2) */
+                       ap->rsn_ie.id = *bp;
+                       if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+                               ap->rsn_ie.size = *(bp + 1);
+                       } else {
+                               DPRINTK(1, "size over :: rsn size=%d \n",
+                                       *(bp + 1));
+                               ap->rsn_ie.size = RSN_IE_BODY_MAX;
+                       }
+                       memcpy(&(ap->rsn_ie.body[0]), bp + 2, ap->rsn_ie.size);
+                       break;
+               case 221:       /* WPA */
+                       if (!memcmp(bp + 2, "\x00\x50\xf2\x01", 4)) {   /* WPA OUI check */
+                               ap->wpa_ie.id = *bp;
+                               if (*(bp + 1) <= RSN_IE_BODY_MAX) {
+                                       ap->wpa_ie.size = *(bp + 1);
+                               } else {
+                                       DPRINTK(1,
+                                               "size over :: wpa size=%d \n",
+                                               *(bp + 1));
+                                       ap->wpa_ie.size = RSN_IE_BODY_MAX;
+                               }
+                               memcpy(&(ap->wpa_ie.body[0]), bp + 2,
+                                      ap->wpa_ie.size);
+                       }
+                       break;
+
+               case 2: /* FH parameter */
+               case 4: /* CF parameter */
+               case 5: /* TIM */
+               case 6: /* IBSS parameter */
+               case 7: /* Country */
+               case 42:        /* ERP information */
+               case 47:        /* Reserve ID 47 Broadcom AP */
+                       break;
+               default:
+                       DPRINTK(4, "unknown Element ID=%d \n", *bp);
+                       break;
+               }
+               offset += 2;    /* id & size field */
+               offset += *(bp + 1);    /* +size offset */
+               bp += (*(bp + 1) + 2);  /* pointer update */
+       }
+
+       return rc;
+}
+
+static
+void hostif_data_indication(struct ks_wlan_private *priv)
+{
+       unsigned int rx_ind_size;       /* indicate data size */
+       struct sk_buff *skb;
+       unsigned short auth_type;
+       unsigned char temp[256];
+
+       unsigned char RecvMIC[8];
+       char buf[128];
+       struct ether_hdr *eth_hdr;
+       unsigned short eth_proto;
+       unsigned long now;
+       struct mic_failure_t *mic_failure;
+       struct ieee802_1x_hdr *aa1x_hdr;
+       struct wpa_eapol_key *eap_key;
+       struct michel_mic_t michel_mic;
+       union iwreq_data wrqu;
+
+       DPRINTK(3, "\n");
+
+       /* min length check */
+       if (priv->rx_size <= ETH_HLEN) {
+               DPRINTK(3, "rx_size = %d\n", priv->rx_size);
+               priv->nstats.rx_errors++;
+               return;
+       }
+
+       auth_type = get_WORD(priv);     /* AuthType */
+       get_WORD(priv); /* Reserve Area */
+
+       eth_hdr = (struct ether_hdr *)(priv->rxp);
+       eth_proto = ntohs(eth_hdr->h_proto);
+       DPRINTK(3, "ether protocol = %04X\n", eth_proto);
+
+       /* source address check */
+       if (!memcmp(&priv->eth_addr[0], eth_hdr->h_source, ETH_ALEN)) {
+               DPRINTK(1, "invalid : source is own mac address !!\n");
+               DPRINTK(1,
+                       "eth_hdrernet->h_dest=%02X:%02X:%02X:%02X:%02X:%02X\n",
+                       eth_hdr->h_source[0], eth_hdr->h_source[1],
+                       eth_hdr->h_source[2], eth_hdr->h_source[3],
+                       eth_hdr->h_source[4], eth_hdr->h_source[5]);
+               priv->nstats.rx_errors++;
+               return;
+       }
+
+       /*  for WPA */
+       if (auth_type != TYPE_DATA && priv->wpa.rsn_enabled) {
+               if (memcmp(&eth_hdr->h_source[0], &priv->eth_addr[0], ETH_ALEN)) {      /* source address check */
+                       if (eth_hdr->h_dest_snap != eth_hdr->h_source_snap) {
+                               DPRINTK(1, "invalid data format\n");
+                               priv->nstats.rx_errors++;
+                               return;
+                       }
+                       if (((auth_type == TYPE_PMK1
+                             && priv->wpa.pairwise_suite ==
+                             IW_AUTH_CIPHER_TKIP) || (auth_type == TYPE_GMK1
+                                                      && priv->wpa.
+                                                      group_suite ==
+                                                      IW_AUTH_CIPHER_TKIP)
+                            || (auth_type == TYPE_GMK2
+                                && priv->wpa.group_suite ==
+                                IW_AUTH_CIPHER_TKIP))
+                           && priv->wpa.key[auth_type - 1].key_len) {
+                               DPRINTK(4, "TKIP: protocol=%04X: size=%u\n",
+                                       eth_proto, priv->rx_size);
+                               /* MIC save */
+                               memcpy(&RecvMIC[0],
+                                      (priv->rxp) + ((priv->rx_size) - 8), 8);
+                               priv->rx_size = priv->rx_size - 8;
+                               if (auth_type > 0 && auth_type < 4) {   /* auth_type check */
+                                       MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[auth_type - 1].rx_mic_key, (uint8_t *) priv->rxp, (int)priv->rx_size, (uint8_t) 0,    /* priority */
+                                                          (uint8_t *)
+                                                          michel_mic.Result);
+                               }
+                               if (memcmp(michel_mic.Result, RecvMIC, 8)) {
+                                       now = jiffies;
+                                       mic_failure = &priv->wpa.mic_failure;
+                                       /* MIC FAILURE */
+                                       if (mic_failure->last_failure_time &&
+                                           (now -
+                                            mic_failure->last_failure_time) /
+                                           HZ >= 60) {
+                                               mic_failure->failure = 0;
+                                       }
+                                       DPRINTK(4, "MIC FAILURE \n");
+                                       if (mic_failure->failure == 0) {
+                                               mic_failure->failure = 1;
+                                               mic_failure->counter = 0;
+                                       } else if (mic_failure->failure == 1) {
+                                               mic_failure->failure = 2;
+                                               mic_failure->counter =
+                                                   (uint16_t) ((now -
+                                                                mic_failure->
+                                                                last_failure_time)
+                                                               / HZ);
+                                               if (!mic_failure->counter)      /* mic_failure counter value range 1-60 */
+                                                       mic_failure->counter =
+                                                           1;
+                                       }
+                                       priv->wpa.mic_failure.
+                                           last_failure_time = now;
+                                       /*  needed parameters: count, keyid, key type, TSC */
+                                       sprintf(buf,
+                                               "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
+                                               "%02x:%02x:%02x:%02x:%02x:%02x)",
+                                               auth_type - 1,
+                                               eth_hdr->
+                                               h_dest[0] & 0x01 ? "broad" :
+                                               "uni", eth_hdr->h_source[0],
+                                               eth_hdr->h_source[1],
+                                               eth_hdr->h_source[2],
+                                               eth_hdr->h_source[3],
+                                               eth_hdr->h_source[4],
+                                               eth_hdr->h_source[5]);
+                                       memset(&wrqu, 0, sizeof(wrqu));
+                                       wrqu.data.length = strlen(buf);
+                                       DPRINTK(4,
+                                               "IWEVENT:MICHAELMICFAILURE\n");
+                                       wireless_send_event(priv->net_dev,
+                                                           IWEVCUSTOM, &wrqu,
+                                                           buf);
+                                       return;
+                               }
+                       }
+               }
+       }
+
+       if ((priv->connect_status & FORCE_DISCONNECT) ||
+           priv->wpa.mic_failure.failure == 2) {
+               return;
+       }
+
+       /* check 13th byte at rx data */
+       switch (*(priv->rxp + 12)) {
+       case 0xAA:      /* SNAP */
+               rx_ind_size = priv->rx_size - 6;
+               skb = dev_alloc_skb(rx_ind_size);
+               DPRINTK(4, "SNAP, rx_ind_size = %d\n", rx_ind_size);
+
+               if (skb) {
+                       memcpy(skb_put(skb, 12), priv->rxp, 12);        /* 8802/FDDI MAC copy */
+                       /* (SNAP+UI..) skip */
+                       memcpy(skb_put(skb, rx_ind_size - 12), priv->rxp + 18, rx_ind_size - 12);       /* copy after Type */
+
+                       aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 20);
+                       if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY
+                           && priv->wpa.rsn_enabled) {
+                               eap_key =
+                                   (struct wpa_eapol_key *)(aa1x_hdr + 1);
+                               atomic_set(&priv->psstatus.snooze_guard, 1);
+                       }
+
+                       /* rx indication */
+                       skb->dev = priv->net_dev;
+                       skb->protocol = eth_type_trans(skb, skb->dev);
+                       priv->nstats.rx_packets++;
+                       priv->nstats.rx_bytes += rx_ind_size;
+                       skb->dev->last_rx = jiffies;
+                       netif_rx(skb);
+               } else {
+                       printk(KERN_WARNING
+                              "%s: Memory squeeze, dropping packet.\n",
+                              skb->dev->name);
+                       priv->nstats.rx_dropped++;
+               }
+               break;
+       case 0xF0:      /* NETBEUI/NetBIOS */
+               rx_ind_size = (priv->rx_size + 2);
+               skb = dev_alloc_skb(rx_ind_size);
+               DPRINTK(3, "NETBEUI/NetBIOS rx_ind_size=%d\n", rx_ind_size);
+
+               if (skb) {
+                       memcpy(skb_put(skb, 12), priv->rxp, 12);        /* 8802/FDDI MAC copy */
+
+                       temp[0] = (((rx_ind_size - 12) >> 8) & 0xff);   /* NETBEUI size add */
+                       temp[1] = ((rx_ind_size - 12) & 0xff);
+                       memcpy(skb_put(skb, 2), temp, 2);
+
+                       memcpy(skb_put(skb, rx_ind_size - 14), priv->rxp + 12, rx_ind_size - 14);       /* copy after Type */
+
+                       aa1x_hdr = (struct ieee802_1x_hdr *)(priv->rxp + 14);
+                       if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY
+                           && priv->wpa.rsn_enabled) {
+                               eap_key =
+                                   (struct wpa_eapol_key *)(aa1x_hdr + 1);
+                               atomic_set(&priv->psstatus.snooze_guard, 1);
+                       }
+
+                       /* rx indication */
+                       skb->dev = priv->net_dev;
+                       skb->protocol = eth_type_trans(skb, skb->dev);
+                       priv->nstats.rx_packets++;
+                       priv->nstats.rx_bytes += rx_ind_size;
+                       skb->dev->last_rx = jiffies;
+                       netif_rx(skb);
+               } else {
+                       printk(KERN_WARNING
+                              "%s: Memory squeeze, dropping packet.\n",
+                              skb->dev->name);
+                       priv->nstats.rx_dropped++;
+               }
+               break;
+       default:        /* other rx data */
+               DPRINTK(2, "invalid data format\n");
+               priv->nstats.rx_errors++;
+       }
+}
+
+static
+void hostif_mib_get_confirm(struct ks_wlan_private *priv)
+{
+       struct net_device *dev = priv->net_dev;
+       uint32_t mib_status;
+       uint32_t mib_attribute;
+       uint16_t mib_val_size;
+       uint16_t mib_val_type;
+
+       DPRINTK(3, "\n");
+
+       mib_status = get_DWORD(priv);   /* MIB status */
+       mib_attribute = get_DWORD(priv);        /* MIB atttibute */
+       mib_val_size = get_WORD(priv);  /* MIB value size */
+       mib_val_type = get_WORD(priv);  /* MIB value type */
+
+       if (mib_status != 0) {
+               /* in case of error */
+               DPRINTK(1, "attribute=%08X, status=%08X\n", mib_attribute,
+                       mib_status);
+               return;
+       }
+
+       switch (mib_attribute) {
+       case DOT11_MAC_ADDRESS:
+               /* MAC address */
+               DPRINTK(3, " mib_attribute=DOT11_MAC_ADDRESS\n");
+               hostif_sme_enqueue(priv, SME_GET_MAC_ADDRESS);
+               memcpy(priv->eth_addr, priv->rxp, ETH_ALEN);
+               priv->mac_address_valid = 1;
+               dev->dev_addr[0] = priv->eth_addr[0];
+               dev->dev_addr[1] = priv->eth_addr[1];
+               dev->dev_addr[2] = priv->eth_addr[2];
+               dev->dev_addr[3] = priv->eth_addr[3];
+               dev->dev_addr[4] = priv->eth_addr[4];
+               dev->dev_addr[5] = priv->eth_addr[5];
+               dev->dev_addr[6] = 0x00;
+               dev->dev_addr[7] = 0x00;
+               printk(KERN_INFO
+                      "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
+                      priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
+                      priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+               break;
+       case DOT11_PRODUCT_VERSION:
+               /* firmware version */
+               DPRINTK(3, " mib_attribute=DOT11_PRODUCT_VERSION\n");
+               priv->version_size = priv->rx_size;
+               memcpy(priv->firmware_version, priv->rxp, priv->rx_size);
+               priv->firmware_version[priv->rx_size] = '\0';
+               printk(KERN_INFO "ks_wlan: firmware ver. = %s\n",
+                      priv->firmware_version);
+               hostif_sme_enqueue(priv, SME_GET_PRODUCT_VERSION);
+               /* wake_up_interruptible_all(&priv->confirm_wait); */
+               complete(&priv->confirm_wait);
+               break;
+       case LOCAL_GAIN:
+               memcpy(&priv->gain, priv->rxp, sizeof(priv->gain));
+               DPRINTK(3, "TxMode=%d, RxMode=%d, TxGain=%d, RxGain=%d\n",
+                       priv->gain.TxMode, priv->gain.RxMode, priv->gain.TxGain,
+                       priv->gain.RxGain);
+               break;
+       case LOCAL_EEPROM_SUM:
+               memcpy(&priv->eeprom_sum, priv->rxp, sizeof(priv->eeprom_sum));
+               DPRINTK(1, "eeprom_sum.type=%x, eeprom_sum.result=%x\n",
+                       priv->eeprom_sum.type, priv->eeprom_sum.result);
+               if (priv->eeprom_sum.type == 0) {
+                       priv->eeprom_checksum = EEPROM_CHECKSUM_NONE;
+               } else if (priv->eeprom_sum.type == 1) {
+                       if (priv->eeprom_sum.result == 0) {
+                               priv->eeprom_checksum = EEPROM_NG;
+                               printk("LOCAL_EEPROM_SUM NG\n");
+                       } else if (priv->eeprom_sum.result == 1) {
+                               priv->eeprom_checksum = EEPROM_OK;
+                       }
+               } else {
+                       printk("LOCAL_EEPROM_SUM error!\n");
+               }
+               break;
+       default:
+               DPRINTK(1, "mib_attribute=%08x\n", (unsigned int)mib_attribute);
+               break;
+       }
+}
+
+static
+void hostif_mib_set_confirm(struct ks_wlan_private *priv)
+{
+       uint32_t mib_status;    /* +04 MIB Status */
+       uint32_t mib_attribute; /* +08 MIB attribute */
+
+       DPRINTK(3, "\n");
+
+       mib_status = get_DWORD(priv);   /* MIB Status */
+       mib_attribute = get_DWORD(priv);        /* MIB attribute */
+
+       if (mib_status != 0) {
+               /* in case of error */
+               DPRINTK(1, "error :: attribute=%08X, status=%08X\n",
+                       mib_attribute, mib_status);
+       }
+
+       switch (mib_attribute) {
+       case DOT11_RTS_THRESHOLD:
+               hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_CONFIRM);
+               break;
+       case DOT11_FRAGMENTATION_THRESHOLD:
+               hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_CONFIRM);
+               break;
+       case DOT11_WEP_DEFAULT_KEY_ID:
+               if (!priv->wpa.wpa_enabled)
+                       hostif_sme_enqueue(priv, SME_WEP_INDEX_CONFIRM);
+               break;
+       case DOT11_WEP_DEFAULT_KEY_VALUE1:
+               DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE1:mib_status=%d\n",
+                       (int)mib_status);
+               if (priv->wpa.rsn_enabled)
+                       hostif_sme_enqueue(priv, SME_SET_PMK_TSC);
+               else
+                       hostif_sme_enqueue(priv, SME_WEP_KEY1_CONFIRM);
+               break;
+       case DOT11_WEP_DEFAULT_KEY_VALUE2:
+               DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE2:mib_status=%d\n",
+                       (int)mib_status);
+               if (priv->wpa.rsn_enabled)
+                       hostif_sme_enqueue(priv, SME_SET_GMK1_TSC);
+               else
+                       hostif_sme_enqueue(priv, SME_WEP_KEY2_CONFIRM);
+               break;
+       case DOT11_WEP_DEFAULT_KEY_VALUE3:
+               DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE3:mib_status=%d\n",
+                       (int)mib_status);
+               if (priv->wpa.rsn_enabled)
+                       hostif_sme_enqueue(priv, SME_SET_GMK2_TSC);
+               else
+                       hostif_sme_enqueue(priv, SME_WEP_KEY3_CONFIRM);
+               break;
+       case DOT11_WEP_DEFAULT_KEY_VALUE4:
+               DPRINTK(2, "DOT11_WEP_DEFAULT_KEY_VALUE4:mib_status=%d\n",
+                       (int)mib_status);
+               if (!priv->wpa.rsn_enabled)
+                       hostif_sme_enqueue(priv, SME_WEP_KEY4_CONFIRM);
+               break;
+       case DOT11_PRIVACY_INVOKED:
+               if (!priv->wpa.rsn_enabled)
+                       hostif_sme_enqueue(priv, SME_WEP_FLAG_CONFIRM);
+               break;
+       case DOT11_RSN_ENABLED:
+               DPRINTK(2, "DOT11_RSN_ENABLED:mib_status=%d\n",
+                       (int)mib_status);
+               hostif_sme_enqueue(priv, SME_RSN_ENABLED_CONFIRM);
+               break;
+       case LOCAL_RSN_MODE:
+               hostif_sme_enqueue(priv, SME_RSN_MODE_CONFIRM);
+               break;
+       case LOCAL_MULTICAST_ADDRESS:
+               hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
+               break;
+       case LOCAL_MULTICAST_FILTER:
+               hostif_sme_enqueue(priv, SME_MULTICAST_CONFIRM);
+               break;
+       case LOCAL_CURRENTADDRESS:
+               priv->mac_address_valid = 1;
+               break;
+       case DOT11_RSN_CONFIG_MULTICAST_CIPHER:
+               DPRINTK(2, "DOT11_RSN_CONFIG_MULTICAST_CIPHER:mib_status=%d\n",
+                       (int)mib_status);
+               hostif_sme_enqueue(priv, SME_RSN_MCAST_CONFIRM);
+               break;
+       case DOT11_RSN_CONFIG_UNICAST_CIPHER:
+               DPRINTK(2, "DOT11_RSN_CONFIG_UNICAST_CIPHER:mib_status=%d\n",
+                       (int)mib_status);
+               hostif_sme_enqueue(priv, SME_RSN_UCAST_CONFIRM);
+               break;
+       case DOT11_RSN_CONFIG_AUTH_SUITE:
+               DPRINTK(2, "DOT11_RSN_CONFIG_AUTH_SUITE:mib_status=%d\n",
+                       (int)mib_status);
+               hostif_sme_enqueue(priv, SME_RSN_AUTH_CONFIRM);
+               break;
+       case DOT11_PMK_TSC:
+               DPRINTK(2, "DOT11_PMK_TSC:mib_status=%d\n", (int)mib_status);
+               break;
+       case DOT11_GMK1_TSC:
+               DPRINTK(2, "DOT11_GMK1_TSC:mib_status=%d\n", (int)mib_status);
+               if (atomic_read(&priv->psstatus.snooze_guard)) {
+                       atomic_set(&priv->psstatus.snooze_guard, 0);
+               }
+               break;
+       case DOT11_GMK2_TSC:
+               DPRINTK(2, "DOT11_GMK2_TSC:mib_status=%d\n", (int)mib_status);
+               if (atomic_read(&priv->psstatus.snooze_guard)) {
+                       atomic_set(&priv->psstatus.snooze_guard, 0);
+               }
+               break;
+       case LOCAL_PMK:
+               DPRINTK(2, "LOCAL_PMK:mib_status=%d\n", (int)mib_status);
+               break;
+       case LOCAL_GAIN:
+               DPRINTK(2, "LOCAL_GAIN:mib_status=%d\n", (int)mib_status);
+               break;
+#ifdef WPS
+       case LOCAL_WPS_ENABLE:
+               DPRINTK(2, "LOCAL_WPS_ENABLE:mib_status=%d\n", (int)mib_status);
+               break;
+       case LOCAL_WPS_PROBE_REQ:
+               DPRINTK(2, "LOCAL_WPS_PROBE_REQ:mib_status=%d\n",
+                       (int)mib_status);
+               break;
+#endif /* WPS */
+       case LOCAL_REGION:
+               DPRINTK(2, "LOCAL_REGION:mib_status=%d\n", (int)mib_status);
+       default:
+               break;
+       }
+}
+
+static
+void hostif_power_mngmt_confirm(struct ks_wlan_private *priv)
+{
+       DPRINTK(3, "\n");
+
+       if (priv->reg.powermgt > POWMGT_ACTIVE_MODE &&
+           priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+               atomic_set(&priv->psstatus.confirm_wait, 0);
+               priv->dev_state = DEVICE_STATE_SLEEP;
+               ks_wlan_hw_power_save(priv);
+       } else {
+               priv->dev_state = DEVICE_STATE_READY;
+       }
+
+}
+
+static
+void hostif_sleep_confirm(struct ks_wlan_private *priv)
+{
+       DPRINTK(3, "\n");
+
+       atomic_set(&priv->sleepstatus.doze_request, 1);
+       queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                          &priv->ks_wlan_hw.rw_wq, 1);
+}
+
+static
+void hostif_start_confirm(struct ks_wlan_private *priv)
+{
+#ifdef  WPS
+       union iwreq_data wrqu;
+       wrqu.data.length = 0;
+       wrqu.data.flags = 0;
+       wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+               memset(wrqu.ap_addr.sa_data, '\0', ETH_ALEN);
+               DPRINTK(3, "IWEVENT: disconnect\n");
+               wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+       }
+#endif
+       DPRINTK(3, " scan_ind_count=%d\n", priv->scan_ind_count);
+       hostif_sme_enqueue(priv, SME_START_CONFIRM);
+}
+
+static
+void hostif_connect_indication(struct ks_wlan_private *priv)
+{
+       unsigned short connect_code;
+       unsigned int tmp = 0;
+       unsigned int old_status = priv->connect_status;
+       struct net_device *netdev = priv->net_dev;
+       union iwreq_data wrqu0;
+       connect_code = get_WORD(priv);
+
+       switch (connect_code) {
+       case RESULT_CONNECT:    /* connect */
+               DPRINTK(3, "connect :: scan_ind_count=%d\n",
+                       priv->scan_ind_count);
+               if (!(priv->connect_status & FORCE_DISCONNECT))
+                       netif_carrier_on(netdev);
+               tmp = FORCE_DISCONNECT & priv->connect_status;
+               priv->connect_status = tmp + CONNECT_STATUS;
+               break;
+       case RESULT_DISCONNECT: /* disconnect */
+               DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
+                       priv->scan_ind_count);
+               netif_carrier_off(netdev);
+               tmp = FORCE_DISCONNECT & priv->connect_status;
+               priv->connect_status = tmp + DISCONNECT_STATUS;
+               break;
+       default:
+               DPRINTK(1, "unknown connect_code=%d :: scan_ind_count=%d\n",
+                       connect_code, priv->scan_ind_count);
+               netif_carrier_off(netdev);
+               tmp = FORCE_DISCONNECT & priv->connect_status;
+               priv->connect_status = tmp + DISCONNECT_STATUS;
+               break;
+       }
+
+       get_current_ap(priv, (struct link_ap_info_t *)priv->rxp);
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS &&
+           (old_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS) {
+               /* for power save */
+               atomic_set(&priv->psstatus.snooze_guard, 0);
+               atomic_set(&priv->psstatus.confirm_wait, 0);
+       }
+       ks_wlan_do_power_save(priv);
+
+       wrqu0.data.length = 0;
+       wrqu0.data.flags = 0;
+       wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS &&
+           (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+               memset(wrqu0.ap_addr.sa_data, '\0', ETH_ALEN);
+               DPRINTK(3, "IWEVENT: disconnect\n");
+               DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
+                       priv->scan_ind_count);
+               wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
+       }
+       priv->scan_ind_count = 0;
+}
+
+static
+void hostif_scan_indication(struct ks_wlan_private *priv)
+{
+       int i;
+       struct ap_info_t *ap_info;
+
+       DPRINTK(3, "scan_ind_count = %d\n", priv->scan_ind_count);
+       ap_info = (struct ap_info_t *)(priv->rxp);
+
+       if (priv->scan_ind_count != 0) {
+               for (i = 0; i < priv->aplist.size; i++) {       /* bssid check */
+                       if (!memcmp
+                           (&(ap_info->bssid[0]),
+                            &(priv->aplist.ap[i].bssid[0]), ETH_ALEN)) {
+                               if (ap_info->frame_type ==
+                                   FRAME_TYPE_PROBE_RESP)
+                                       get_ap_information(priv, ap_info,
+                                                          &(priv->aplist.
+                                                            ap[i]));
+                               return;
+                       }
+               }
+       }
+       priv->scan_ind_count++;
+       if (priv->scan_ind_count < LOCAL_APLIST_MAX + 1) {
+               DPRINTK(4, " scan_ind_count=%d :: aplist.size=%d\n",
+                       priv->scan_ind_count, priv->aplist.size);
+               get_ap_information(priv, (struct ap_info_t *)(priv->rxp),
+                                  &(priv->aplist.
+                                    ap[priv->scan_ind_count - 1]));
+               priv->aplist.size = priv->scan_ind_count;
+       } else {
+               DPRINTK(4, " count over :: scan_ind_count=%d\n",
+                       priv->scan_ind_count);
+       }
+
+}
+
+static
+void hostif_stop_confirm(struct ks_wlan_private *priv)
+{
+       unsigned int tmp = 0;
+       unsigned int old_status = priv->connect_status;
+       struct net_device *netdev = priv->net_dev;
+       union iwreq_data wrqu0;
+
+       DPRINTK(3, "\n");
+       if (priv->dev_state == DEVICE_STATE_SLEEP)
+               priv->dev_state = DEVICE_STATE_READY;
+
+       /* disconnect indication */
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+               netif_carrier_off(netdev);
+               tmp = FORCE_DISCONNECT & priv->connect_status;
+               priv->connect_status = tmp | DISCONNECT_STATUS;
+               printk("IWEVENT: disconnect\n");
+
+               wrqu0.data.length = 0;
+               wrqu0.data.flags = 0;
+               wrqu0.ap_addr.sa_family = ARPHRD_ETHER;
+               if ((priv->connect_status & CONNECT_STATUS_MASK) ==
+                   DISCONNECT_STATUS
+                   && (old_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+                       memset(wrqu0.ap_addr.sa_data, '\0', ETH_ALEN);
+                       DPRINTK(3, "IWEVENT: disconnect\n");
+                       printk("IWEVENT: disconnect\n");
+                       DPRINTK(3, "disconnect :: scan_ind_count=%d\n",
+                               priv->scan_ind_count);
+                       wireless_send_event(netdev, SIOCGIWAP, &wrqu0, NULL);
+               }
+               priv->scan_ind_count = 0;
+       }
+
+       hostif_sme_enqueue(priv, SME_STOP_CONFIRM);
+}
+
+static
+void hostif_ps_adhoc_set_confirm(struct ks_wlan_private *priv)
+{
+       DPRINTK(3, "\n");
+       priv->infra_status = 0; /* infrastructure mode cancel */
+       hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
+
+}
+
+static
+void hostif_infrastructure_set_confirm(struct ks_wlan_private *priv)
+{
+       uint16_t result_code;
+       DPRINTK(3, "\n");
+       result_code = get_WORD(priv);
+       DPRINTK(3, "result code = %d\n", result_code);
+       priv->infra_status = 1; /* infrastructure mode set */
+       hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
+}
+
+static
+void hostif_adhoc_set_confirm(struct ks_wlan_private *priv)
+{
+       DPRINTK(3, "\n");
+       priv->infra_status = 1; /* infrastructure mode set */
+       hostif_sme_enqueue(priv, SME_MODE_SET_CONFIRM);
+}
+
+static
+void hostif_associate_indication(struct ks_wlan_private *priv)
+{
+       struct association_request_t *assoc_req;
+       struct association_response_t *assoc_resp;
+       unsigned char *pb;
+       union iwreq_data wrqu;
+       char buf[IW_CUSTOM_MAX];
+       char *pbuf = &buf[0];
+       int i;
+
+       static const char associnfo_leader0[] = "ASSOCINFO(ReqIEs=";
+       static const char associnfo_leader1[] = " RespIEs=";
+
+       DPRINTK(3, "\n");
+       assoc_req = (struct association_request_t *)(priv->rxp);
+       assoc_resp = (struct association_response_t *)(assoc_req + 1);
+       pb = (unsigned char *)(assoc_resp + 1);
+
+       memset(&wrqu, 0, sizeof(wrqu));
+       memcpy(pbuf, associnfo_leader0, sizeof(associnfo_leader0) - 1);
+       wrqu.data.length += sizeof(associnfo_leader0) - 1;
+       pbuf += sizeof(associnfo_leader0) - 1;
+
+       for (i = 0; i < assoc_req->reqIEs_size; i++)
+               pbuf += sprintf(pbuf, "%02x", *(pb + i));
+       wrqu.data.length += (assoc_req->reqIEs_size) * 2;
+
+       memcpy(pbuf, associnfo_leader1, sizeof(associnfo_leader1) - 1);
+       wrqu.data.length += sizeof(associnfo_leader1) - 1;
+       pbuf += sizeof(associnfo_leader1) - 1;
+
+       pb += assoc_req->reqIEs_size;
+       for (i = 0; i < assoc_resp->respIEs_size; i++)
+               pbuf += sprintf(pbuf, "%02x", *(pb + i));
+       wrqu.data.length += (assoc_resp->respIEs_size) * 2;
+
+       pbuf += sprintf(pbuf, ")");
+       wrqu.data.length += 1;
+
+       DPRINTK(3, "IWEVENT:ASSOCINFO\n");
+       wireless_send_event(priv->net_dev, IWEVCUSTOM, &wrqu, buf);
+}
+
+static
+void hostif_bss_scan_confirm(struct ks_wlan_private *priv)
+{
+       unsigned int result_code;
+       struct net_device *dev = priv->net_dev;
+       union iwreq_data wrqu;
+       result_code = get_DWORD(priv);
+       DPRINTK(2, "result=%d :: scan_ind_count=%d\n", result_code,
+               priv->scan_ind_count);
+
+       priv->sme_i.sme_flag &= ~SME_AP_SCAN;
+       hostif_sme_enqueue(priv, SME_BSS_SCAN_CONFIRM);
+
+       wrqu.data.length = 0;
+       wrqu.data.flags = 0;
+       DPRINTK(3, "IWEVENT: SCAN CONFIRM\n");
+       wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
+       priv->scan_ind_count = 0;
+}
+
+static
+void hostif_phy_information_confirm(struct ks_wlan_private *priv)
+{
+       struct iw_statistics *wstats = &priv->wstats;
+       unsigned char rssi, signal, noise;
+       unsigned char LinkSpeed;
+       unsigned int TransmittedFrameCount, ReceivedFragmentCount;
+       unsigned int FailedCount, FCSErrorCount;
+
+       DPRINTK(3, "\n");
+       rssi = get_BYTE(priv);
+       signal = get_BYTE(priv);
+       noise = get_BYTE(priv);
+       LinkSpeed = get_BYTE(priv);
+       TransmittedFrameCount = get_DWORD(priv);
+       ReceivedFragmentCount = get_DWORD(priv);
+       FailedCount = get_DWORD(priv);
+       FCSErrorCount = get_DWORD(priv);
+
+       DPRINTK(4, "phyinfo confirm rssi=%d signal=%d\n", rssi, signal);
+       priv->current_rate = (LinkSpeed & RATE_MASK);
+       wstats->qual.qual = signal;
+       wstats->qual.level = 256 - rssi;
+       wstats->qual.noise = 0; /* invalid noise value */
+       wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
+
+       DPRINTK(3, "\n    rssi=%u\n    signal=%u\n    LinkSpeed=%ux500Kbps\n \
+   TransmittedFrameCount=%u\n    ReceivedFragmentCount=%u\n    FailedCount=%u\n \
+   FCSErrorCount=%u\n", rssi, signal, LinkSpeed, TransmittedFrameCount, ReceivedFragmentCount, FailedCount, FCSErrorCount);
+
+       /* wake_up_interruptible_all(&priv->confirm_wait); */
+       complete(&priv->confirm_wait);
+}
+
+static
+void hostif_mic_failure_confirm(struct ks_wlan_private *priv)
+{
+       DPRINTK(3, "mic_failure=%u\n", priv->wpa.mic_failure.failure);
+       hostif_sme_enqueue(priv, SME_MIC_FAILURE_CONFIRM);
+}
+
+static
+void hostif_event_check(struct ks_wlan_private *priv)
+{
+       unsigned short event;
+
+       DPRINTK(4, "\n");
+       event = get_WORD(priv); /* get event */
+       switch (event) {
+       case HIF_DATA_IND:
+               hostif_data_indication(priv);
+               break;
+       case HIF_MIB_GET_CONF:
+               hostif_mib_get_confirm(priv);
+               break;
+       case HIF_MIB_SET_CONF:
+               hostif_mib_set_confirm(priv);
+               break;
+       case HIF_POWERMGT_CONF:
+               hostif_power_mngmt_confirm(priv);
+               break;
+       case HIF_SLEEP_CONF:
+               hostif_sleep_confirm(priv);
+               break;
+       case HIF_START_CONF:
+               hostif_start_confirm(priv);
+               break;
+       case HIF_CONNECT_IND:
+               hostif_connect_indication(priv);
+               break;
+       case HIF_STOP_CONF:
+               hostif_stop_confirm(priv);
+               break;
+       case HIF_PS_ADH_SET_CONF:
+               hostif_ps_adhoc_set_confirm(priv);
+               break;
+       case HIF_INFRA_SET_CONF:
+       case HIF_INFRA_SET2_CONF:
+               hostif_infrastructure_set_confirm(priv);
+               break;
+       case HIF_ADH_SET_CONF:
+       case HIF_ADH_SET2_CONF:
+               hostif_adhoc_set_confirm(priv);
+               break;
+       case HIF_ASSOC_INFO_IND:
+               hostif_associate_indication(priv);
+               break;
+       case HIF_MIC_FAILURE_CONF:
+               hostif_mic_failure_confirm(priv);
+               break;
+       case HIF_SCAN_CONF:
+               hostif_bss_scan_confirm(priv);
+               break;
+       case HIF_PHY_INFO_CONF:
+       case HIF_PHY_INFO_IND:
+               hostif_phy_information_confirm(priv);
+               break;
+       case HIF_SCAN_IND:
+               hostif_scan_indication(priv);
+               break;
+       case HIF_AP_SET_CONF:
+       default:
+               //DPRINTK(1, "undefined event[%04X]\n", event);
+               printk("undefined event[%04X]\n", event);
+               /* wake_up_all(&priv->confirm_wait); */
+               complete(&priv->confirm_wait);
+               break;
+       }
+
+       /* add event to hostt buffer */
+       priv->hostt.buff[priv->hostt.qtail] = event;
+       priv->hostt.qtail = (priv->hostt.qtail + 1) % SME_EVENT_BUFF_SIZE;
+}
+
+#define CHECK_ALINE(size) (size%4 ? (size+(4-(size%4))):size)
+
+int hostif_data_request(struct ks_wlan_private *priv, struct sk_buff *packet)
+{
+       unsigned int packet_len = 0;
+
+       unsigned char *buffer = NULL;
+       unsigned int length = 0;
+       struct hostif_data_request_t *pp;
+       unsigned char *p;
+       int result = 0;
+       unsigned short eth_proto;
+       struct ether_hdr *eth_hdr;
+       struct michel_mic_t michel_mic;
+       unsigned short keyinfo = 0;
+       struct ieee802_1x_hdr *aa1x_hdr;
+       struct wpa_eapol_key *eap_key;
+       struct ethhdr *eth;
+
+       packet_len = packet->len;
+       if (packet_len > ETH_FRAME_LEN) {
+               DPRINTK(1, "bad length packet_len=%d \n", packet_len);
+               dev_kfree_skb(packet);
+               return -1;
+       }
+
+       if (((priv->connect_status & CONNECT_STATUS_MASK) == DISCONNECT_STATUS)
+           || (priv->connect_status & FORCE_DISCONNECT)
+           || priv->wpa.mic_failure.stop) {
+               DPRINTK(3, " DISCONNECT\n");
+               if (netif_queue_stopped(priv->net_dev))
+                       netif_wake_queue(priv->net_dev);
+               if (packet)
+                       dev_kfree_skb(packet);
+
+               return 0;
+       }
+
+       /* for PowerSave */
+       if (atomic_read(&priv->psstatus.status) == PS_SNOOZE) { /* power save wakeup */
+               if (!netif_queue_stopped(priv->net_dev))
+                       netif_stop_queue(priv->net_dev);
+       }
+
+       DPRINTK(4, "skb_buff length=%d\n", packet_len);
+       pp = (struct hostif_data_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp) + 6 + packet_len + 8),
+                   KS_WLAN_MEM_FLAG);
+
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               dev_kfree_skb(packet);
+               return -2;
+       }
+
+       p = (unsigned char *)pp->data;
+
+       buffer = packet->data;
+       length = packet->len;
+
+       /* packet check */
+       eth = (struct ethhdr *)packet->data;
+       if (memcmp(&priv->eth_addr[0], eth->h_source, ETH_ALEN)) {
+               DPRINTK(1, "invalid mac address !!\n");
+               DPRINTK(1, "ethernet->h_source=%02X:%02X:%02X:%02X:%02X:%02X\n",
+                       eth->h_source[0], eth->h_source[1], eth->h_source[2],
+                       eth->h_source[3], eth->h_source[4], eth->h_source[5]);
+               return -3;
+       }
+
+       /* MAC address copy */
+       memcpy(p, buffer, 12);  /* DST/SRC MAC address */
+       p += 12;
+       buffer += 12;
+       length -= 12;
+       /* EtherType/Length check */
+       if (*(buffer + 1) + (*buffer << 8) > 1500) {
+               /* ProtocolEAP = *(buffer+1) + (*buffer << 8); */
+               /* DPRINTK(2, "Send [SNAP]Type %x\n",ProtocolEAP); */
+               /* SAP/CTL/OUI(6 byte) add */
+               *p++ = 0xAA;    /* DSAP */
+               *p++ = 0xAA;    /* SSAP */
+               *p++ = 0x03;    /* CTL */
+               *p++ = 0x00;    /* OUI ("000000") */
+               *p++ = 0x00;    /* OUI ("000000") */
+               *p++ = 0x00;    /* OUI ("000000") */
+               packet_len += 6;
+       } else {
+               DPRINTK(4, "DIX\n");
+               /* Length(2 byte) delete */
+               buffer += 2;
+               length -= 2;
+               packet_len -= 2;
+       }
+
+       /* pp->data copy */
+       memcpy(p, buffer, length);
+
+       p += length;
+
+       /* for WPA */
+       eth_hdr = (struct ether_hdr *)&pp->data[0];
+       eth_proto = ntohs(eth_hdr->h_proto);
+
+       /* for MIC FAILUER REPORT check */
+       if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
+           && priv->wpa.mic_failure.failure > 0) {
+               aa1x_hdr = (struct ieee802_1x_hdr *)(eth_hdr + 1);
+               if (aa1x_hdr->type == IEEE802_1X_TYPE_EAPOL_KEY) {
+                       eap_key = (struct wpa_eapol_key *)(aa1x_hdr + 1);
+                       keyinfo = ntohs(eap_key->key_info);
+               }
+       }
+
+       if (priv->wpa.rsn_enabled && priv->wpa.key[0].key_len) {
+               if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
+                   && !(priv->wpa.key[1].key_len)
+                   && !(priv->wpa.key[2].key_len)
+                   && !(priv->wpa.key[3].key_len)) {
+                       pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH);      /* no encryption */
+               } else {
+                       if (priv->wpa.pairwise_suite == IW_AUTH_CIPHER_TKIP) {
+                               MichaelMICFunction(&michel_mic, (uint8_t *) priv->wpa.key[0].tx_mic_key, (uint8_t *) & pp->data[0], (int)packet_len, (uint8_t) 0,       /* priority */
+                                                  (uint8_t *) michel_mic.
+                                                  Result);
+                               memcpy(p, michel_mic.Result, 8);
+                               length += 8;
+                               packet_len += 8;
+                               p += 8;
+                               pp->auth_type =
+                                   cpu_to_le16((uint16_t) TYPE_DATA);
+
+                       } else if (priv->wpa.pairwise_suite ==
+                                  IW_AUTH_CIPHER_CCMP) {
+                               pp->auth_type =
+                                   cpu_to_le16((uint16_t) TYPE_DATA);
+                       }
+               }
+       } else {
+               if (eth_proto == ETHER_PROTOCOL_TYPE_EAP)
+                       pp->auth_type = cpu_to_le16((uint16_t) TYPE_AUTH);
+               else
+                       pp->auth_type = cpu_to_le16((uint16_t) TYPE_DATA);
+       }
+
+       /* header value set */
+       pp->header.size =
+           cpu_to_le16((uint16_t)
+                       (sizeof(*pp) - sizeof(pp->header.size) + packet_len));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_DATA_REQ);
+
+       /* tx request */
+       result =
+           ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + packet_len),
+                         (void *)send_packet_complete, (void *)priv,
+                         (void *)packet);
+
+       /* MIC FAILUER REPORT check */
+       if (eth_proto == ETHER_PROTOCOL_TYPE_EAP
+           && priv->wpa.mic_failure.failure > 0) {
+               if (keyinfo & WPA_KEY_INFO_ERROR
+                   && keyinfo & WPA_KEY_INFO_REQUEST) {
+                       DPRINTK(3, " MIC ERROR Report SET : %04X\n", keyinfo);
+                       hostif_sme_enqueue(priv, SME_MIC_FAILURE_REQUEST);
+               }
+               if (priv->wpa.mic_failure.failure == 2)
+                       priv->wpa.mic_failure.stop = 1;
+       }
+
+       return result;
+}
+
+#define ps_confirm_wait_inc(priv)  do{if(atomic_read(&priv->psstatus.status) > PS_ACTIVE_SET){ \
+                                                  atomic_inc(&priv->psstatus.confirm_wait); \
+                                                  /* atomic_set(&priv->psstatus.status, PS_CONF_WAIT);*/ \
+                                      } }while(0)
+
+static
+void hostif_mib_get_request(struct ks_wlan_private *priv,
+                           unsigned long mib_attribute)
+{
+       struct hostif_mib_get_request_t *pp;
+
+       DPRINTK(3, "\n");
+
+       /* make primitive */
+       pp = (struct hostif_mib_get_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_MIB_GET_REQ);
+       pp->mib_attribute = cpu_to_le32((uint32_t) mib_attribute);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_mib_set_request(struct ks_wlan_private *priv,
+                           unsigned long mib_attribute, unsigned short size,
+                           unsigned short type, void *vp)
+{
+       struct hostif_mib_set_request_t *pp;
+
+       DPRINTK(3, "\n");
+
+       if (priv->dev_state < DEVICE_STATE_BOOT) {
+               DPRINTK(3, "DeviceRemove\n");
+               return;
+       }
+
+       /* make primitive */
+       pp = (struct hostif_mib_set_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp) + size), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+
+       pp->header.size =
+           cpu_to_le16((uint16_t)
+                       (sizeof(*pp) - sizeof(pp->header.size) + size));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_MIB_SET_REQ);
+       pp->mib_attribute = cpu_to_le32((uint32_t) mib_attribute);
+       pp->mib_value.size = cpu_to_le16((uint16_t) size);
+       pp->mib_value.type = cpu_to_le16((uint16_t) type);
+       memcpy(&pp->mib_value.body, vp, size);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + size), NULL, NULL,
+                     NULL);
+}
+
+static
+void hostif_start_request(struct ks_wlan_private *priv, unsigned char mode)
+{
+       struct hostif_start_request_t *pp;
+
+       DPRINTK(3, "\n");
+
+       /* make primitive */
+       pp = (struct hostif_start_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_START_REQ);
+       pp->mode = cpu_to_le16((uint16_t) mode);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+
+       priv->aplist.size = 0;
+       priv->scan_ind_count = 0;
+}
+
+static
+void hostif_ps_adhoc_set_request(struct ks_wlan_private *priv)
+{
+       struct hostif_ps_adhoc_set_request_t *pp;
+       uint16_t capability;
+
+       DPRINTK(3, "\n");
+
+       /* make primitive */
+       pp = (struct hostif_ps_adhoc_set_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       memset(pp, 0, sizeof(*pp));
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_PS_ADH_SET_REQ);
+       pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+       pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+       pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+       pp->channel = cpu_to_le16((uint16_t) (priv->reg.channel));
+       pp->rate_set.size = priv->reg.rate_set.size;
+       memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+              priv->reg.rate_set.size);
+
+       capability = 0x0000;
+       if (priv->reg.preamble == SHORT_PREAMBLE) {
+               /* short preamble */
+               capability |= BSS_CAP_SHORT_PREAMBLE;
+       }
+       capability &= ~(BSS_CAP_PBCC);  /* pbcc not support */
+       if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+               capability |= BSS_CAP_SHORT_SLOT_TIME;  /* ShortSlotTime support */
+               capability &= ~(BSS_CAP_DSSS_OFDM);     /* DSSS OFDM */
+       }
+       pp->capability = cpu_to_le16((uint16_t) capability);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_infrastructure_set_request(struct ks_wlan_private *priv)
+{
+       struct hostif_infrastructure_set_request_t *pp;
+       uint16_t capability;
+
+       DPRINTK(3, "ssid.size=%d \n", priv->reg.ssid.size);
+
+       /* make primitive */
+       pp = (struct hostif_infrastructure_set_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_INFRA_SET_REQ);
+       pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+       pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+       pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+
+       pp->rate_set.size = priv->reg.rate_set.size;
+       memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+              priv->reg.rate_set.size);
+       pp->ssid.size = priv->reg.ssid.size;
+       memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+       capability = 0x0000;
+       if (priv->reg.preamble == SHORT_PREAMBLE) {
+               /* short preamble */
+               capability |= BSS_CAP_SHORT_PREAMBLE;
+       }
+       capability &= ~(BSS_CAP_PBCC);  /* pbcc not support */
+       if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+               capability |= BSS_CAP_SHORT_SLOT_TIME;  /* ShortSlotTime support */
+               capability &= ~(BSS_CAP_DSSS_OFDM);     /* DSSS OFDM not support */
+       }
+       pp->capability = cpu_to_le16((uint16_t) capability);
+       pp->beacon_lost_count =
+           cpu_to_le16((uint16_t) (priv->reg.beacon_lost_count));
+       pp->auth_type = cpu_to_le16((uint16_t) (priv->reg.authenticate_type));
+
+       pp->channel_list.body[0] = 1;
+       pp->channel_list.body[1] = 8;
+       pp->channel_list.body[2] = 2;
+       pp->channel_list.body[3] = 9;
+       pp->channel_list.body[4] = 3;
+       pp->channel_list.body[5] = 10;
+       pp->channel_list.body[6] = 4;
+       pp->channel_list.body[7] = 11;
+       pp->channel_list.body[8] = 5;
+       pp->channel_list.body[9] = 12;
+       pp->channel_list.body[10] = 6;
+       pp->channel_list.body[11] = 13;
+       pp->channel_list.body[12] = 7;
+       if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+               pp->channel_list.size = 13;
+       } else {
+               pp->channel_list.body[13] = 14;
+               pp->channel_list.size = 14;
+       }
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+void hostif_infrastructure_set2_request(struct ks_wlan_private *priv)
+{
+       struct hostif_infrastructure_set2_request_t *pp;
+       uint16_t capability;
+
+       DPRINTK(2, "ssid.size=%d \n", priv->reg.ssid.size);
+
+       /* make primitive */
+       pp = (struct hostif_infrastructure_set2_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_INFRA_SET2_REQ);
+       pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+       pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+       pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+
+       pp->rate_set.size = priv->reg.rate_set.size;
+       memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+              priv->reg.rate_set.size);
+       pp->ssid.size = priv->reg.ssid.size;
+       memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+       capability = 0x0000;
+       if (priv->reg.preamble == SHORT_PREAMBLE) {
+               /* short preamble */
+               capability |= BSS_CAP_SHORT_PREAMBLE;
+       }
+       capability &= ~(BSS_CAP_PBCC);  /* pbcc not support */
+       if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+               capability |= BSS_CAP_SHORT_SLOT_TIME;  /* ShortSlotTime support */
+               capability &= ~(BSS_CAP_DSSS_OFDM);     /* DSSS OFDM not support */
+       }
+       pp->capability = cpu_to_le16((uint16_t) capability);
+       pp->beacon_lost_count =
+           cpu_to_le16((uint16_t) (priv->reg.beacon_lost_count));
+       pp->auth_type = cpu_to_le16((uint16_t) (priv->reg.authenticate_type));
+
+       pp->channel_list.body[0] = 1;
+       pp->channel_list.body[1] = 8;
+       pp->channel_list.body[2] = 2;
+       pp->channel_list.body[3] = 9;
+       pp->channel_list.body[4] = 3;
+       pp->channel_list.body[5] = 10;
+       pp->channel_list.body[6] = 4;
+       pp->channel_list.body[7] = 11;
+       pp->channel_list.body[8] = 5;
+       pp->channel_list.body[9] = 12;
+       pp->channel_list.body[10] = 6;
+       pp->channel_list.body[11] = 13;
+       pp->channel_list.body[12] = 7;
+       if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+               pp->channel_list.size = 13;
+       } else {
+               pp->channel_list.body[13] = 14;
+               pp->channel_list.size = 14;
+       }
+
+       memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_adhoc_set_request(struct ks_wlan_private *priv)
+{
+       struct hostif_adhoc_set_request_t *pp;
+       uint16_t capability;
+
+       DPRINTK(3, "\n");
+
+       /* make primitive */
+       pp = (struct hostif_adhoc_set_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       memset(pp, 0, sizeof(*pp));
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_ADH_SET_REQ);
+       pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+       pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+       pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+       pp->channel = cpu_to_le16((uint16_t) (priv->reg.channel));
+       pp->rate_set.size = priv->reg.rate_set.size;
+       memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+              priv->reg.rate_set.size);
+       pp->ssid.size = priv->reg.ssid.size;
+       memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+       capability = 0x0000;
+       if (priv->reg.preamble == SHORT_PREAMBLE) {
+               /* short preamble */
+               capability |= BSS_CAP_SHORT_PREAMBLE;
+       }
+       capability &= ~(BSS_CAP_PBCC);  /* pbcc not support */
+       if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+               capability |= BSS_CAP_SHORT_SLOT_TIME;  /* ShortSlotTime support */
+               capability &= ~(BSS_CAP_DSSS_OFDM);     /* DSSS OFDM not support */
+       }
+       pp->capability = cpu_to_le16((uint16_t) capability);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_adhoc_set2_request(struct ks_wlan_private *priv)
+{
+       struct hostif_adhoc_set2_request_t *pp;
+       uint16_t capability;
+
+       DPRINTK(3, "\n");
+
+       /* make primitive */
+       pp = (struct hostif_adhoc_set2_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       memset(pp, 0, sizeof(*pp));
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_ADH_SET_REQ);
+       pp->phy_type = cpu_to_le16((uint16_t) (priv->reg.phy_type));
+       pp->cts_mode = cpu_to_le16((uint16_t) (priv->reg.cts_mode));
+       pp->scan_type = cpu_to_le16((uint16_t) (priv->reg.scan_type));
+       pp->rate_set.size = priv->reg.rate_set.size;
+       memcpy(&pp->rate_set.body[0], &priv->reg.rate_set.body[0],
+              priv->reg.rate_set.size);
+       pp->ssid.size = priv->reg.ssid.size;
+       memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size);
+
+       capability = 0x0000;
+       if (priv->reg.preamble == SHORT_PREAMBLE) {
+               /* short preamble */
+               capability |= BSS_CAP_SHORT_PREAMBLE;
+       }
+       capability &= ~(BSS_CAP_PBCC);  /* pbcc not support */
+       if (priv->reg.phy_type != D_11B_ONLY_MODE) {
+               capability |= BSS_CAP_SHORT_SLOT_TIME;  /* ShortSlotTime support */
+               capability &= ~(BSS_CAP_DSSS_OFDM);     /* DSSS OFDM not support */
+       }
+       pp->capability = cpu_to_le16((uint16_t) capability);
+
+       pp->channel_list.body[0] = priv->reg.channel;
+       pp->channel_list.size = 1;
+       memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_stop_request(struct ks_wlan_private *priv)
+{
+       struct hostif_stop_request_t *pp;
+
+       DPRINTK(3, "\n");
+
+       /* make primitive */
+       pp = (struct hostif_stop_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_STOP_REQ);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_phy_information_request(struct ks_wlan_private *priv)
+{
+       struct hostif_phy_information_request_t *pp;
+
+       DPRINTK(3, "\n");
+
+       /* make primitive */
+       pp = (struct hostif_phy_information_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_PHY_INFO_REQ);
+       if (priv->reg.phy_info_timer) {
+               pp->type = cpu_to_le16((uint16_t) TIME_TYPE);
+               pp->time = cpu_to_le16((uint16_t) (priv->reg.phy_info_timer));
+       } else {
+               pp->type = cpu_to_le16((uint16_t) NORMAL_TYPE);
+               pp->time = cpu_to_le16((uint16_t) 0);
+       }
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_power_mngmt_request(struct ks_wlan_private *priv,
+                               unsigned long mode, unsigned long wake_up,
+                               unsigned long receiveDTIMs)
+{
+       struct hostif_power_mngmt_request_t *pp;
+
+       DPRINTK(3, "mode=%lu wake_up=%lu receiveDTIMs=%lu\n", mode, wake_up,
+               receiveDTIMs);
+       /* make primitive */
+       pp = (struct hostif_power_mngmt_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_POWERMGT_REQ);
+       pp->mode = cpu_to_le32((uint32_t) mode);
+       pp->wake_up = cpu_to_le32((uint32_t) wake_up);
+       pp->receiveDTIMs = cpu_to_le32((uint32_t) receiveDTIMs);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+static
+void hostif_sleep_request(struct ks_wlan_private *priv, unsigned long mode)
+{
+       struct hostif_sleep_request_t *pp;
+
+       DPRINTK(3, "mode=%lu \n", mode);
+
+       if (mode == SLP_SLEEP) {
+               /* make primitive */
+               pp = (struct hostif_sleep_request_t *)
+                   kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+               if (pp == NULL) {
+                       DPRINTK(3, "allocate memory failed..\n");
+                       return;
+               }
+               pp->header.size =
+                   cpu_to_le16((uint16_t)
+                               (sizeof(*pp) - sizeof(pp->header.size)));
+               pp->header.event = cpu_to_le16((uint16_t) HIF_SLEEP_REQ);
+
+               /* send to device request */
+               ps_confirm_wait_inc(priv);
+               ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL,
+                             NULL);
+       } else if (mode == SLP_ACTIVE) {
+               atomic_set(&priv->sleepstatus.wakeup_request, 1);
+               queue_delayed_work(priv->ks_wlan_hw.ks7010sdio_wq,
+                                  &priv->ks_wlan_hw.rw_wq, 1);
+       } else {
+               DPRINTK(3, "invalid mode %ld \n", mode);
+               return;
+       }
+}
+
+static
+void hostif_bss_scan_request(struct ks_wlan_private *priv,
+                            unsigned long scan_type, uint8_t * scan_ssid,
+                            uint8_t scan_ssid_len)
+{
+       struct hostif_bss_scan_request_t *pp;
+
+       DPRINTK(2, "\n");
+       /* make primitive */
+       pp = (struct hostif_bss_scan_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_SCAN_REQ);
+       pp->scan_type = scan_type;
+
+       pp->ch_time_min = cpu_to_le32((uint32_t) 110);  /* default value */
+       pp->ch_time_max = cpu_to_le32((uint32_t) 130);  /* default value */
+       pp->channel_list.body[0] = 1;
+       pp->channel_list.body[1] = 8;
+       pp->channel_list.body[2] = 2;
+       pp->channel_list.body[3] = 9;
+       pp->channel_list.body[4] = 3;
+       pp->channel_list.body[5] = 10;
+       pp->channel_list.body[6] = 4;
+       pp->channel_list.body[7] = 11;
+       pp->channel_list.body[8] = 5;
+       pp->channel_list.body[9] = 12;
+       pp->channel_list.body[10] = 6;
+       pp->channel_list.body[11] = 13;
+       pp->channel_list.body[12] = 7;
+       if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+               pp->channel_list.size = 13;
+       } else {
+               pp->channel_list.body[13] = 14;
+               pp->channel_list.size = 14;
+       }
+       pp->ssid.size = 0;
+
+       /* specified SSID SCAN */
+       if (scan_ssid_len > 0 && scan_ssid_len <= 32) {
+               pp->ssid.size = scan_ssid_len;
+               memcpy(&pp->ssid.body[0], scan_ssid, scan_ssid_len);
+       }
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+
+       priv->aplist.size = 0;
+       priv->scan_ind_count = 0;
+}
+
+static
+void hostif_mic_failure_request(struct ks_wlan_private *priv,
+                               unsigned short failure_count,
+                               unsigned short timer)
+{
+       struct hostif_mic_failure_request_t *pp;
+
+       DPRINTK(3, "count=%d :: timer=%d\n", failure_count, timer);
+       /* make primitive */
+       pp = (struct hostif_mic_failure_request_t *)
+           kmalloc(hif_align_size(sizeof(*pp)), KS_WLAN_MEM_FLAG);
+       if (pp == NULL) {
+               DPRINTK(3, "allocate memory failed..\n");
+               return;
+       }
+       pp->header.size =
+           cpu_to_le16((uint16_t) (sizeof(*pp) - sizeof(pp->header.size)));
+       pp->header.event = cpu_to_le16((uint16_t) HIF_MIC_FAILURE_REQ);
+       pp->failure_count = cpu_to_le16((uint16_t) failure_count);
+       pp->timer = cpu_to_le16((uint16_t) timer);
+
+       /* send to device request */
+       ps_confirm_wait_inc(priv);
+       ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp)), NULL, NULL, NULL);
+}
+
+/* Device I/O Recieve indicate */
+static void devio_rec_ind(struct ks_wlan_private *priv, unsigned char *p,
+                         unsigned int size)
+{
+       if (priv->device_open_status) {
+               spin_lock(&priv->dev_read_lock);        /* request spin lock */
+               priv->dev_data[atomic_read(&priv->rec_count)] = p;
+               priv->dev_size[atomic_read(&priv->rec_count)] = size;
+
+               if (atomic_read(&priv->event_count) != DEVICE_STOCK_COUNT) {
+                       /* rx event count inc */
+                       atomic_inc(&priv->event_count);
+               }
+               atomic_inc(&priv->rec_count);
+               if (atomic_read(&priv->rec_count) == DEVICE_STOCK_COUNT)
+                       atomic_set(&priv->rec_count, 0);
+
+               wake_up_interruptible_all(&priv->devread_wait);
+
+               /* release spin lock */
+               spin_unlock(&priv->dev_read_lock);
+       }
+}
+
+void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
+                   unsigned int size)
+{
+       DPRINTK(4, "\n");
+
+       devio_rec_ind(priv, p, size);
+
+       priv->rxp = p;
+       priv->rx_size = size;
+
+       if (get_WORD(priv) == priv->rx_size) {  /* length check !! */
+               hostif_event_check(priv);       /* event check */
+       }
+}
+
+static
+void hostif_sme_set_wep(struct ks_wlan_private *priv, int type)
+{
+       uint32_t val;
+       switch (type) {
+       case SME_WEP_INDEX_REQUEST:
+               val = cpu_to_le32((uint32_t) (priv->reg.wep_index));
+               hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
+                                      sizeof(val), MIB_VALUE_TYPE_INT, &val);
+               break;
+       case SME_WEP_KEY1_REQUEST:
+               if (!priv->wpa.wpa_enabled)
+                       hostif_mib_set_request(priv,
+                                              DOT11_WEP_DEFAULT_KEY_VALUE1,
+                                              priv->reg.wep_key[0].size,
+                                              MIB_VALUE_TYPE_OSTRING,
+                                              &priv->reg.wep_key[0].val[0]);
+               break;
+       case SME_WEP_KEY2_REQUEST:
+               if (!priv->wpa.wpa_enabled)
+                       hostif_mib_set_request(priv,
+                                              DOT11_WEP_DEFAULT_KEY_VALUE2,
+                                              priv->reg.wep_key[1].size,
+                                              MIB_VALUE_TYPE_OSTRING,
+                                              &priv->reg.wep_key[1].val[0]);
+               break;
+       case SME_WEP_KEY3_REQUEST:
+               if (!priv->wpa.wpa_enabled)
+                       hostif_mib_set_request(priv,
+                                              DOT11_WEP_DEFAULT_KEY_VALUE3,
+                                              priv->reg.wep_key[2].size,
+                                              MIB_VALUE_TYPE_OSTRING,
+                                              &priv->reg.wep_key[2].val[0]);
+               break;
+       case SME_WEP_KEY4_REQUEST:
+               if (!priv->wpa.wpa_enabled)
+                       hostif_mib_set_request(priv,
+                                              DOT11_WEP_DEFAULT_KEY_VALUE4,
+                                              priv->reg.wep_key[3].size,
+                                              MIB_VALUE_TYPE_OSTRING,
+                                              &priv->reg.wep_key[3].val[0]);
+               break;
+       case SME_WEP_FLAG_REQUEST:
+               val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
+               hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
+                                      sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+               break;
+       }
+
+       return;
+}
+
+struct wpa_suite_t {
+       unsigned short size;
+       unsigned char suite[4][CIPHER_ID_LEN];
+} __attribute__ ((packed));
+
+struct rsn_mode_t {
+       uint32_t rsn_mode;
+       uint16_t rsn_capability;
+} __attribute__ ((packed));
+
+static
+void hostif_sme_set_rsn(struct ks_wlan_private *priv, int type)
+{
+       struct wpa_suite_t wpa_suite;
+       struct rsn_mode_t rsn_mode;
+       uint32_t val;
+
+       memset(&wpa_suite, 0, sizeof(wpa_suite));
+
+       switch (type) {
+       case SME_RSN_UCAST_REQUEST:
+               wpa_suite.size = cpu_to_le16((uint16_t) 1);
+               switch (priv->wpa.pairwise_suite) {
+               case IW_AUTH_CIPHER_NONE:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_NONE, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_NONE, CIPHER_ID_LEN);
+                       break;
+               case IW_AUTH_CIPHER_WEP40:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_WEP40, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_WEP40, CIPHER_ID_LEN);
+                       break;
+               case IW_AUTH_CIPHER_TKIP:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_TKIP, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_TKIP, CIPHER_ID_LEN);
+                       break;
+               case IW_AUTH_CIPHER_CCMP:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_CCMP, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_CCMP, CIPHER_ID_LEN);
+                       break;
+               case IW_AUTH_CIPHER_WEP104:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_WEP104, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_WEP104, CIPHER_ID_LEN);
+                       break;
+               }
+
+               hostif_mib_set_request(priv, DOT11_RSN_CONFIG_UNICAST_CIPHER,
+                                      sizeof(wpa_suite.size) +
+                                      CIPHER_ID_LEN * wpa_suite.size,
+                                      MIB_VALUE_TYPE_OSTRING, &wpa_suite);
+               break;
+       case SME_RSN_MCAST_REQUEST:
+               switch (priv->wpa.group_suite) {
+               case IW_AUTH_CIPHER_NONE:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_NONE, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_NONE, CIPHER_ID_LEN);
+                       break;
+               case IW_AUTH_CIPHER_WEP40:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_WEP40, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_WEP40, CIPHER_ID_LEN);
+                       break;
+               case IW_AUTH_CIPHER_TKIP:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_TKIP, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_TKIP, CIPHER_ID_LEN);
+                       break;
+               case IW_AUTH_CIPHER_CCMP:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_CCMP, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_CCMP, CIPHER_ID_LEN);
+                       break;
+               case IW_AUTH_CIPHER_WEP104:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA2_WEP104, CIPHER_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      CIPHER_ID_WPA_WEP104, CIPHER_ID_LEN);
+                       break;
+               }
+
+               hostif_mib_set_request(priv, DOT11_RSN_CONFIG_MULTICAST_CIPHER,
+                                      CIPHER_ID_LEN, MIB_VALUE_TYPE_OSTRING,
+                                      &wpa_suite.suite[0][0]);
+               break;
+       case SME_RSN_AUTH_REQUEST:
+               wpa_suite.size = cpu_to_le16((uint16_t) 1);
+               switch (priv->wpa.key_mgmt_suite) {
+               case IW_AUTH_KEY_MGMT_802_1X:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      KEY_MGMT_ID_WPA2_1X, KEY_MGMT_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      KEY_MGMT_ID_WPA_1X, KEY_MGMT_ID_LEN);
+                       break;
+               case IW_AUTH_KEY_MGMT_PSK:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      KEY_MGMT_ID_WPA2_PSK, KEY_MGMT_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      KEY_MGMT_ID_WPA_PSK, KEY_MGMT_ID_LEN);
+                       break;
+               case 0:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      KEY_MGMT_ID_WPA2_NONE, KEY_MGMT_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      KEY_MGMT_ID_WPA_NONE, KEY_MGMT_ID_LEN);
+                       break;
+               case 4:
+                       if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2)
+                               memcpy(&wpa_suite.suite[0][0],
+                                      KEY_MGMT_ID_WPA2_WPANONE,
+                                      KEY_MGMT_ID_LEN);
+                       else
+                               memcpy(&wpa_suite.suite[0][0],
+                                      KEY_MGMT_ID_WPA_WPANONE,
+                                      KEY_MGMT_ID_LEN);
+                       break;
+               }
+
+               hostif_mib_set_request(priv, DOT11_RSN_CONFIG_AUTH_SUITE,
+                                      sizeof(wpa_suite.size) +
+                                      KEY_MGMT_ID_LEN * wpa_suite.size,
+                                      MIB_VALUE_TYPE_OSTRING, &wpa_suite);
+               break;
+       case SME_RSN_ENABLED_REQUEST:
+               val = cpu_to_le32((uint32_t) (priv->wpa.rsn_enabled));
+               hostif_mib_set_request(priv, DOT11_RSN_ENABLED,
+                                      sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+               break;
+       case SME_RSN_MODE_REQUEST:
+               if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA2) {
+                       rsn_mode.rsn_mode =
+                           cpu_to_le32((uint32_t) RSN_MODE_WPA2);
+                       rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+               } else if (priv->wpa.version == IW_AUTH_WPA_VERSION_WPA) {
+                       rsn_mode.rsn_mode =
+                           cpu_to_le32((uint32_t) RSN_MODE_WPA);
+                       rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+               } else {
+                       rsn_mode.rsn_mode =
+                           cpu_to_le32((uint32_t) RSN_MODE_NONE);
+                       rsn_mode.rsn_capability = cpu_to_le16((uint16_t) 0);
+               }
+               hostif_mib_set_request(priv, LOCAL_RSN_MODE, sizeof(rsn_mode),
+                                      MIB_VALUE_TYPE_OSTRING, &rsn_mode);
+               break;
+
+       }
+       return;
+}
+
+static
+void hostif_sme_mode_setup(struct ks_wlan_private *priv)
+{
+       unsigned char rate_size;
+       unsigned char rate_octet[RATE_SET_MAX_SIZE];
+       int i = 0;
+
+       /* rate setting if rate segging is auto for changing phy_type (#94) */
+       if (priv->reg.tx_rate == TX_RATE_FULL_AUTO) {
+               if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+                       priv->reg.rate_set.body[3] = TX_RATE_11M;
+                       priv->reg.rate_set.body[2] = TX_RATE_5M;
+                       priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
+                       priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
+                       priv->reg.rate_set.size = 4;
+               } else {        /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+                       priv->reg.rate_set.body[11] = TX_RATE_54M;
+                       priv->reg.rate_set.body[10] = TX_RATE_48M;
+                       priv->reg.rate_set.body[9] = TX_RATE_36M;
+                       priv->reg.rate_set.body[8] = TX_RATE_18M;
+                       priv->reg.rate_set.body[7] = TX_RATE_9M;
+                       priv->reg.rate_set.body[6] = TX_RATE_24M | BASIC_RATE;
+                       priv->reg.rate_set.body[5] = TX_RATE_12M | BASIC_RATE;
+                       priv->reg.rate_set.body[4] = TX_RATE_6M | BASIC_RATE;
+                       priv->reg.rate_set.body[3] = TX_RATE_11M | BASIC_RATE;
+                       priv->reg.rate_set.body[2] = TX_RATE_5M | BASIC_RATE;
+                       priv->reg.rate_set.body[1] = TX_RATE_2M | BASIC_RATE;
+                       priv->reg.rate_set.body[0] = TX_RATE_1M | BASIC_RATE;
+                       priv->reg.rate_set.size = 12;
+               }
+       }
+
+       /* rate mask by phy setting */
+       if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+               for (i = 0; i < priv->reg.rate_set.size; i++) {
+                       if (IS_11B_RATE(priv->reg.rate_set.body[i])) {
+                               if ((priv->reg.rate_set.body[i] & RATE_MASK) >=
+                                   TX_RATE_5M)
+                                       rate_octet[i] =
+                                           priv->reg.rate_set.
+                                           body[i] & RATE_MASK;
+                               else
+                                       rate_octet[i] =
+                                           priv->reg.rate_set.body[i];
+                       } else
+                               break;
+               }
+
+       } else {        /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+               for (i = 0; i < priv->reg.rate_set.size; i++) {
+                       if (IS_11BG_RATE(priv->reg.rate_set.body[i])) {
+                               if (IS_OFDM_EXT_RATE
+                                   (priv->reg.rate_set.body[i]))
+                                       rate_octet[i] =
+                                           priv->reg.rate_set.
+                                           body[i] & RATE_MASK;
+                               else
+                                       rate_octet[i] =
+                                           priv->reg.rate_set.body[i];
+                       } else
+                               break;
+               }
+       }
+       rate_size = i;
+       if (rate_size == 0) {
+               if (priv->reg.phy_type == D_11G_ONLY_MODE)
+                       rate_octet[0] = TX_RATE_6M | BASIC_RATE;
+               else
+                       rate_octet[0] = TX_RATE_2M | BASIC_RATE;
+               rate_size = 1;
+       }
+
+       /* rate set update */
+       priv->reg.rate_set.size = rate_size;
+       memcpy(&priv->reg.rate_set.body[0], &rate_octet[0], rate_size);
+
+       switch (priv->reg.operation_mode) {
+       case MODE_PSEUDO_ADHOC:
+               /* Pseudo Ad-Hoc mode */
+               hostif_ps_adhoc_set_request(priv);
+               break;
+       case MODE_INFRASTRUCTURE:
+               /* Infrastructure mode */
+               if (!is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+                       hostif_infrastructure_set_request(priv);
+               } else {
+                       hostif_infrastructure_set2_request(priv);
+                       DPRINTK(2,
+                               "Infra bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
+                               priv->reg.bssid[0], priv->reg.bssid[1],
+                               priv->reg.bssid[2], priv->reg.bssid[3],
+                               priv->reg.bssid[4], priv->reg.bssid[5]);
+               }
+               break;
+       case MODE_ADHOC:
+               /* IEEE802.11 Ad-Hoc mode */
+               if (!is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+                       hostif_adhoc_set_request(priv);
+               } else {
+                       hostif_adhoc_set2_request(priv);
+                       DPRINTK(2,
+                               "Adhoc bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
+                               priv->reg.bssid[0], priv->reg.bssid[1],
+                               priv->reg.bssid[2], priv->reg.bssid[3],
+                               priv->reg.bssid[4], priv->reg.bssid[5]);
+               }
+               break;
+       default:
+               break;
+       }
+
+       return;
+}
+
+static
+void hostif_sme_multicast_set(struct ks_wlan_private *priv)
+{
+
+       struct net_device *dev = priv->net_dev;
+       int mc_count;
+       struct netdev_hw_addr *ha;
+       char set_address[NIC_MAX_MCAST_LIST * ETH_ALEN];
+       unsigned long filter_type;
+       int i = 0;
+
+       DPRINTK(3, "\n");
+
+       spin_lock(&priv->multicast_spin);
+
+       memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
+
+       if (dev->flags & IFF_PROMISC) {
+               filter_type = cpu_to_le32((uint32_t) MCAST_FILTER_PROMISC);
+               hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+                                      sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
+                                      &filter_type);
+       } else if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST)
+                  || (dev->flags & IFF_ALLMULTI)) {
+               filter_type = cpu_to_le32((uint32_t) MCAST_FILTER_MCASTALL);
+               hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+                                      sizeof(filter_type), MIB_VALUE_TYPE_BOOL,
+                                      &filter_type);
+       } else {
+               if (priv->sme_i.sme_flag & SME_MULTICAST) {
+                       mc_count = netdev_mc_count(dev);
+                       netdev_for_each_mc_addr(ha, dev) {
+                               memcpy(&set_address[i * ETH_ALEN], ha->addr,
+                                      ETH_ALEN);
+                               i++;
+                       }
+                       priv->sme_i.sme_flag &= ~SME_MULTICAST;
+                       hostif_mib_set_request(priv, LOCAL_MULTICAST_ADDRESS,
+                                              (ETH_ALEN * mc_count),
+                                              MIB_VALUE_TYPE_OSTRING,
+                                              &set_address[0]);
+               } else {
+                       filter_type =
+                           cpu_to_le32((uint32_t) MCAST_FILTER_MCAST);
+                       priv->sme_i.sme_flag |= SME_MULTICAST;
+                       hostif_mib_set_request(priv, LOCAL_MULTICAST_FILTER,
+                                              sizeof(filter_type),
+                                              MIB_VALUE_TYPE_BOOL,
+                                              &filter_type);
+               }
+       }
+
+       spin_unlock(&priv->multicast_spin);
+
+}
+
+static
+void hostif_sme_powermgt_set(struct ks_wlan_private *priv)
+{
+       unsigned long mode, wake_up, receiveDTIMs;
+
+       DPRINTK(3, "\n");
+       switch (priv->reg.powermgt) {
+       case POWMGT_ACTIVE_MODE:
+               mode = POWER_ACTIVE;
+               wake_up = 0;
+               receiveDTIMs = 0;
+               break;
+       case POWMGT_SAVE1_MODE:
+               if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+                       mode = POWER_SAVE;
+                       wake_up = 0;
+                       receiveDTIMs = 0;
+               } else {
+                       mode = POWER_ACTIVE;
+                       wake_up = 0;
+                       receiveDTIMs = 0;
+               }
+               break;
+       case POWMGT_SAVE2_MODE:
+               if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+                       mode = POWER_SAVE;
+                       wake_up = 0;
+                       receiveDTIMs = 1;
+               } else {
+                       mode = POWER_ACTIVE;
+                       wake_up = 0;
+                       receiveDTIMs = 0;
+               }
+               break;
+       default:
+               mode = POWER_ACTIVE;
+               wake_up = 0;
+               receiveDTIMs = 0;
+               break;
+       }
+       hostif_power_mngmt_request(priv, mode, wake_up, receiveDTIMs);
+
+       return;
+}
+
+static
+void hostif_sme_sleep_set(struct ks_wlan_private *priv)
+{
+       DPRINTK(3, "\n");
+       switch (priv->sleep_mode) {
+       case SLP_SLEEP:
+               hostif_sleep_request(priv, priv->sleep_mode);
+               break;
+       case SLP_ACTIVE:
+               hostif_sleep_request(priv, priv->sleep_mode);
+               break;
+       default:
+               break;
+       }
+
+       return;
+}
+
+static
+void hostif_sme_set_key(struct ks_wlan_private *priv, int type)
+{
+       uint32_t val;
+       switch (type) {
+       case SME_SET_FLAG:
+               val = cpu_to_le32((uint32_t) (priv->reg.privacy_invoked));
+               hostif_mib_set_request(priv, DOT11_PRIVACY_INVOKED,
+                                      sizeof(val), MIB_VALUE_TYPE_BOOL, &val);
+               break;
+       case SME_SET_TXKEY:
+               val = cpu_to_le32((uint32_t) (priv->wpa.txkey));
+               hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_ID,
+                                      sizeof(val), MIB_VALUE_TYPE_INT, &val);
+               break;
+       case SME_SET_KEY1:
+               hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE1,
+                                      priv->wpa.key[0].key_len,
+                                      MIB_VALUE_TYPE_OSTRING,
+                                      &priv->wpa.key[0].key_val[0]);
+               break;
+       case SME_SET_KEY2:
+               hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE2,
+                                      priv->wpa.key[1].key_len,
+                                      MIB_VALUE_TYPE_OSTRING,
+                                      &priv->wpa.key[1].key_val[0]);
+               break;
+       case SME_SET_KEY3:
+               hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE3,
+                                      priv->wpa.key[2].key_len,
+                                      MIB_VALUE_TYPE_OSTRING,
+                                      &priv->wpa.key[2].key_val[0]);
+               break;
+       case SME_SET_KEY4:
+               hostif_mib_set_request(priv, DOT11_WEP_DEFAULT_KEY_VALUE4,
+                                      priv->wpa.key[3].key_len,
+                                      MIB_VALUE_TYPE_OSTRING,
+                                      &priv->wpa.key[3].key_val[0]);
+               break;
+       case SME_SET_PMK_TSC:
+               hostif_mib_set_request(priv, DOT11_PMK_TSC,
+                                      WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
+                                      &priv->wpa.key[0].rx_seq[0]);
+               break;
+       case SME_SET_GMK1_TSC:
+               hostif_mib_set_request(priv, DOT11_GMK1_TSC,
+                                      WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
+                                      &priv->wpa.key[1].rx_seq[0]);
+               break;
+       case SME_SET_GMK2_TSC:
+               hostif_mib_set_request(priv, DOT11_GMK2_TSC,
+                                      WPA_RX_SEQ_LEN, MIB_VALUE_TYPE_OSTRING,
+                                      &priv->wpa.key[2].rx_seq[0]);
+               break;
+       }
+       return;
+}
+
+static
+void hostif_sme_set_pmksa(struct ks_wlan_private *priv)
+{
+       struct pmk_cache_t {
+               uint16_t size;
+               struct {
+                       uint8_t bssid[ETH_ALEN];
+                       uint8_t pmkid[IW_PMKID_LEN];
+               } __attribute__ ((packed)) list[PMK_LIST_MAX];
+       } __attribute__ ((packed)) pmkcache;
+       struct pmk_t *pmk;
+       struct list_head *ptr;
+       int i;
+
+       DPRINTK(4, "pmklist.size=%d\n", priv->pmklist.size);
+       i = 0;
+       list_for_each(ptr, &priv->pmklist.head) {
+               pmk = list_entry(ptr, struct pmk_t, list);
+               if (i < PMK_LIST_MAX) {
+                       memcpy(pmkcache.list[i].bssid, pmk->bssid, ETH_ALEN);
+                       memcpy(pmkcache.list[i].pmkid, pmk->pmkid,
+                              IW_PMKID_LEN);
+                       i++;
+               }
+       }
+       pmkcache.size = cpu_to_le16((uint16_t) (priv->pmklist.size));
+       hostif_mib_set_request(priv, LOCAL_PMK,
+                              sizeof(priv->pmklist.size) + (ETH_ALEN +
+                                                            IW_PMKID_LEN) *
+                              (priv->pmklist.size), MIB_VALUE_TYPE_OSTRING,
+                              &pmkcache);
+}
+
+/* execute sme */
+static
+void hostif_sme_execute(struct ks_wlan_private *priv, int event)
+{
+       uint32_t val;
+
+       DPRINTK(3, "event=%d\n", event);
+       switch (event) {
+       case SME_START:
+               if (priv->dev_state == DEVICE_STATE_BOOT) {
+                       hostif_mib_get_request(priv, DOT11_MAC_ADDRESS);
+               }
+               break;
+       case SME_MULTICAST_REQUEST:
+               hostif_sme_multicast_set(priv);
+               break;
+       case SME_MACADDRESS_SET_REQUEST:
+               hostif_mib_set_request(priv, LOCAL_CURRENTADDRESS, ETH_ALEN,
+                                      MIB_VALUE_TYPE_OSTRING,
+                                      &priv->eth_addr[0]);
+               break;
+       case SME_BSS_SCAN_REQUEST:
+               hostif_bss_scan_request(priv, priv->reg.scan_type,
+                                       priv->scan_ssid, priv->scan_ssid_len);
+               break;
+       case SME_POW_MNGMT_REQUEST:
+               hostif_sme_powermgt_set(priv);
+               break;
+       case SME_PHY_INFO_REQUEST:
+               hostif_phy_information_request(priv);
+               break;
+       case SME_MIC_FAILURE_REQUEST:
+               if (priv->wpa.mic_failure.failure == 1) {
+                       hostif_mic_failure_request(priv,
+                                                  priv->wpa.mic_failure.
+                                                  failure - 1, 0);
+               } else if (priv->wpa.mic_failure.failure == 2) {
+                       hostif_mic_failure_request(priv,
+                                                  priv->wpa.mic_failure.
+                                                  failure - 1,
+                                                  priv->wpa.mic_failure.
+                                                  counter);
+               } else
+                       DPRINTK(4,
+                               "SME_MIC_FAILURE_REQUEST: failure count=%u error?\n",
+                               priv->wpa.mic_failure.failure);
+               break;
+       case SME_MIC_FAILURE_CONFIRM:
+               if (priv->wpa.mic_failure.failure == 2) {
+                       if (priv->wpa.mic_failure.stop)
+                               priv->wpa.mic_failure.stop = 0;
+                       priv->wpa.mic_failure.failure = 0;
+                       hostif_start_request(priv, priv->reg.operation_mode);
+               }
+               break;
+       case SME_GET_MAC_ADDRESS:
+               if (priv->dev_state == DEVICE_STATE_BOOT) {
+                       hostif_mib_get_request(priv, DOT11_PRODUCT_VERSION);
+               }
+               break;
+       case SME_GET_PRODUCT_VERSION:
+               if (priv->dev_state == DEVICE_STATE_BOOT) {
+                       priv->dev_state = DEVICE_STATE_PREINIT;
+               }
+               break;
+       case SME_STOP_REQUEST:
+               hostif_stop_request(priv);
+               break;
+       case SME_RTS_THRESHOLD_REQUEST:
+               val = cpu_to_le32((uint32_t) (priv->reg.rts));
+               hostif_mib_set_request(priv, DOT11_RTS_THRESHOLD,
+                                      sizeof(val), MIB_VALUE_TYPE_INT, &val);
+               break;
+       case SME_FRAGMENTATION_THRESHOLD_REQUEST:
+               val = cpu_to_le32((uint32_t) (priv->reg.fragment));
+               hostif_mib_set_request(priv, DOT11_FRAGMENTATION_THRESHOLD,
+                                      sizeof(val), MIB_VALUE_TYPE_INT, &val);
+               break;
+       case SME_WEP_INDEX_REQUEST:
+       case SME_WEP_KEY1_REQUEST:
+       case SME_WEP_KEY2_REQUEST:
+       case SME_WEP_KEY3_REQUEST:
+       case SME_WEP_KEY4_REQUEST:
+       case SME_WEP_FLAG_REQUEST:
+               hostif_sme_set_wep(priv, event);
+               break;
+       case SME_RSN_UCAST_REQUEST:
+       case SME_RSN_MCAST_REQUEST:
+       case SME_RSN_AUTH_REQUEST:
+       case SME_RSN_ENABLED_REQUEST:
+       case SME_RSN_MODE_REQUEST:
+               hostif_sme_set_rsn(priv, event);
+               break;
+       case SME_SET_FLAG:
+       case SME_SET_TXKEY:
+       case SME_SET_KEY1:
+       case SME_SET_KEY2:
+       case SME_SET_KEY3:
+       case SME_SET_KEY4:
+       case SME_SET_PMK_TSC:
+       case SME_SET_GMK1_TSC:
+       case SME_SET_GMK2_TSC:
+               hostif_sme_set_key(priv, event);
+               break;
+       case SME_SET_PMKSA:
+               hostif_sme_set_pmksa(priv);
+               break;
+#ifdef WPS
+       case SME_WPS_ENABLE_REQUEST:
+               hostif_mib_set_request(priv, LOCAL_WPS_ENABLE,
+                                      sizeof(priv->wps.wps_enabled),
+                                      MIB_VALUE_TYPE_INT,
+                                      &priv->wps.wps_enabled);
+               break;
+       case SME_WPS_PROBE_REQUEST:
+               hostif_mib_set_request(priv, LOCAL_WPS_PROBE_REQ,
+                                      priv->wps.ielen,
+                                      MIB_VALUE_TYPE_OSTRING, priv->wps.ie);
+               break;
+#endif /* WPS */
+       case SME_MODE_SET_REQUEST:
+               hostif_sme_mode_setup(priv);
+               break;
+       case SME_SET_GAIN:
+               hostif_mib_set_request(priv, LOCAL_GAIN,
+                                      sizeof(priv->gain),
+                                      MIB_VALUE_TYPE_OSTRING, &priv->gain);
+               break;
+       case SME_GET_GAIN:
+               hostif_mib_get_request(priv, LOCAL_GAIN);
+               break;
+       case SME_GET_EEPROM_CKSUM:
+               priv->eeprom_checksum = EEPROM_FW_NOT_SUPPORT;  /* initialize */
+               hostif_mib_get_request(priv, LOCAL_EEPROM_SUM);
+               break;
+       case SME_START_REQUEST:
+               hostif_start_request(priv, priv->reg.operation_mode);
+               break;
+       case SME_START_CONFIRM:
+               /* for power save */
+               atomic_set(&priv->psstatus.snooze_guard, 0);
+               atomic_set(&priv->psstatus.confirm_wait, 0);
+               if (priv->dev_state == DEVICE_STATE_PREINIT) {
+                       priv->dev_state = DEVICE_STATE_INIT;
+               }
+               /* wake_up_interruptible_all(&priv->confirm_wait); */
+               complete(&priv->confirm_wait);
+               break;
+       case SME_SLEEP_REQUEST:
+               hostif_sme_sleep_set(priv);
+               break;
+       case SME_SET_REGION:
+               val = cpu_to_le32((uint32_t) (priv->region));
+               hostif_mib_set_request(priv, LOCAL_REGION,
+                                      sizeof(val), MIB_VALUE_TYPE_INT, &val);
+               break;
+       case SME_MULTICAST_CONFIRM:
+       case SME_BSS_SCAN_CONFIRM:
+       case SME_POW_MNGMT_CONFIRM:
+       case SME_PHY_INFO_CONFIRM:
+       case SME_STOP_CONFIRM:
+       case SME_RTS_THRESHOLD_CONFIRM:
+       case SME_FRAGMENTATION_THRESHOLD_CONFIRM:
+       case SME_WEP_INDEX_CONFIRM:
+       case SME_WEP_KEY1_CONFIRM:
+       case SME_WEP_KEY2_CONFIRM:
+       case SME_WEP_KEY3_CONFIRM:
+       case SME_WEP_KEY4_CONFIRM:
+       case SME_WEP_FLAG_CONFIRM:
+       case SME_RSN_UCAST_CONFIRM:
+       case SME_RSN_MCAST_CONFIRM:
+       case SME_RSN_AUTH_CONFIRM:
+       case SME_RSN_ENABLED_CONFIRM:
+       case SME_RSN_MODE_CONFIRM:
+       case SME_MODE_SET_CONFIRM:
+               break;
+       case SME_TERMINATE:
+       default:
+               break;
+       }
+}
+
+static
+void hostif_sme_task(unsigned long dev)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev;
+
+       DPRINTK(3, "\n");
+
+       if (priv->dev_state >= DEVICE_STATE_BOOT) {
+               if (0 < cnt_smeqbody(priv)
+                   && priv->dev_state >= DEVICE_STATE_BOOT) {
+                       hostif_sme_execute(priv,
+                                          priv->sme_i.event_buff[priv->sme_i.
+                                                                 qhead]);
+                       inc_smeqhead(priv);
+                       if (0 < cnt_smeqbody(priv))
+                               tasklet_schedule(&priv->sme_task);
+               }
+       }
+       return;
+}
+
+/* send to Station Management Entity module */
+void hostif_sme_enqueue(struct ks_wlan_private *priv, unsigned short event)
+{
+       DPRINTK(3, "\n");
+
+       /* enqueue sme event */
+       if (cnt_smeqbody(priv) < (SME_EVENT_BUFF_SIZE - 1)) {
+               priv->sme_i.event_buff[priv->sme_i.qtail] = event;
+               inc_smeqtail(priv);
+               //DPRINTK(3,"inc_smeqtail \n");
+#ifdef KS_WLAN_DEBUG
+               if (priv->sme_i.max_event_count < cnt_smeqbody(priv))
+                       priv->sme_i.max_event_count = cnt_smeqbody(priv);
+#endif /* KS_WLAN_DEBUG */
+       } else {
+               /* in case of buffer overflow */
+               //DPRINTK(2,"sme queue buffer overflow\n");
+               printk("sme queue buffer overflow\n");
+       }
+
+       tasklet_schedule(&priv->sme_task);
+
+}
+
+int hostif_init(struct ks_wlan_private *priv)
+{
+       int rc = 0;
+       int i;
+
+       DPRINTK(3, "\n");
+
+       priv->aplist.size = 0;
+       for (i = 0; i < LOCAL_APLIST_MAX; i++)
+               memset(&(priv->aplist.ap[i]), 0, sizeof(struct local_ap_t));
+       priv->infra_status = 0;
+       priv->current_rate = 4;
+       priv->connect_status = DISCONNECT_STATUS;
+
+       spin_lock_init(&priv->multicast_spin);
+
+       spin_lock_init(&priv->dev_read_lock);
+       init_waitqueue_head(&priv->devread_wait);
+       priv->dev_count = 0;
+       atomic_set(&priv->event_count, 0);
+       atomic_set(&priv->rec_count, 0);
+
+       /* for power save */
+       atomic_set(&priv->psstatus.status, PS_NONE);
+       atomic_set(&priv->psstatus.confirm_wait, 0);
+       atomic_set(&priv->psstatus.snooze_guard, 0);
+       /* init_waitqueue_head(&priv->psstatus.wakeup_wait); */
+       init_completion(&priv->psstatus.wakeup_wait);
+       //INIT_WORK(&priv->ks_wlan_wakeup_task, ks_wlan_hw_wakeup_task, (void *)priv);
+       INIT_WORK(&priv->ks_wlan_wakeup_task, ks_wlan_hw_wakeup_task);
+
+       /* WPA */
+       memset(&(priv->wpa), 0, sizeof(priv->wpa));
+       priv->wpa.rsn_enabled = 0;
+       priv->wpa.mic_failure.failure = 0;
+       priv->wpa.mic_failure.last_failure_time = 0;
+       priv->wpa.mic_failure.stop = 0;
+       memset(&(priv->pmklist), 0, sizeof(priv->pmklist));
+       INIT_LIST_HEAD(&priv->pmklist.head);
+       for (i = 0; i < PMK_LIST_MAX; i++)
+               INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
+
+       priv->sme_i.sme_status = SME_IDLE;
+       priv->sme_i.qhead = priv->sme_i.qtail = 0;
+#ifdef KS_WLAN_DEBUG
+       priv->sme_i.max_event_count = 0;
+#endif
+       spin_lock_init(&priv->sme_i.sme_spin);
+       priv->sme_i.sme_flag = 0;
+
+       tasklet_init(&priv->sme_task, hostif_sme_task, (unsigned long)priv);
+
+       return rc;
+}
+
+void hostif_exit(struct ks_wlan_private *priv)
+{
+       tasklet_kill(&priv->sme_task);
+       return;
+}
diff --git a/drivers/staging/ks7010/ks_hostif.h b/drivers/staging/ks7010/ks_hostif.h
new file mode 100644 (file)
index 0000000..dc806b5
--- /dev/null
@@ -0,0 +1,644 @@
+/*
+ *   Driver for KeyStream wireless LAN
+ *   
+ *   Copyright (c) 2005-2008 KeyStream Corp.
+ *   Copyright (C) 2009 Renesas Technology Corp.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License version 2 as
+ *   published by the Free Software Foundation.
+ */
+
+#ifndef _KS_HOSTIF_H_
+#define _KS_HOSTIF_H_
+/*
+ * HOST-MAC I/F events
+ */
+#define HIF_DATA_REQ           0xE001
+#define HIF_DATA_IND           0xE801
+#define HIF_MIB_GET_REQ                0xE002
+#define HIF_MIB_GET_CONF       0xE802
+#define HIF_MIB_SET_REQ                0xE003
+#define HIF_MIB_SET_CONF       0xE803
+#define HIF_POWERMGT_REQ       0xE004
+#define HIF_POWERMGT_CONF      0xE804
+#define HIF_START_REQ          0xE005
+#define HIF_START_CONF         0xE805
+#define HIF_CONNECT_IND                0xE806
+#define HIF_STOP_REQ           0xE006
+#define HIF_STOP_CONF          0xE807
+#define HIF_PS_ADH_SET_REQ     0xE007
+#define HIF_PS_ADH_SET_CONF    0xE808
+#define HIF_INFRA_SET_REQ      0xE008
+#define HIF_INFRA_SET_CONF     0xE809
+#define HIF_ADH_SET_REQ                0xE009
+#define HIF_ADH_SET_CONF       0xE80A
+#define HIF_AP_SET_REQ         0xE00A
+#define HIF_AP_SET_CONF                0xE80B
+#define HIF_ASSOC_INFO_IND     0xE80C
+#define HIF_MIC_FAILURE_REQ    0xE00B
+#define HIF_MIC_FAILURE_CONF   0xE80D
+#define HIF_SCAN_REQ           0xE00C
+#define HIF_SCAN_CONF          0xE80E
+#define HIF_PHY_INFO_REQ       0xE00D
+#define HIF_PHY_INFO_CONF      0xE80F
+#define HIF_SLEEP_REQ          0xE00E
+#define HIF_SLEEP_CONF         0xE810
+#define HIF_PHY_INFO_IND       0xE811
+#define HIF_SCAN_IND           0xE812
+#define HIF_INFRA_SET2_REQ     0xE00F
+#define HIF_INFRA_SET2_CONF    0xE813
+#define HIF_ADH_SET2_REQ       0xE010
+#define HIF_ADH_SET2_CONF      0xE814
+
+#define HIF_REQ_MAX            0xE010
+
+/*
+ * HOST-MAC I/F data structure
+ * Byte alignmet Little Endian
+ */
+
+struct hostif_hdr {
+       uint16_t size;
+       uint16_t event;
+} __attribute__ ((packed));
+
+struct hostif_data_request_t {
+       struct hostif_hdr header;
+       uint16_t auth_type;
+#define TYPE_DATA 0x0000
+#define TYPE_AUTH 0x0001
+       uint16_t reserved;
+       uint8_t data[0];
+} __attribute__ ((packed));
+
+struct hostif_data_indication_t {
+       struct hostif_hdr header;
+       uint16_t auth_type;
+/* #define TYPE_DATA 0x0000 */
+#define TYPE_PMK1 0x0001
+#define TYPE_GMK1 0x0002
+#define TYPE_GMK2 0x0003
+       uint16_t reserved;
+       uint8_t data[0];
+} __attribute__ ((packed));
+
+#define CHANNEL_LIST_MAX_SIZE 14
+struct channel_list_t {
+       uint8_t size;
+       uint8_t body[CHANNEL_LIST_MAX_SIZE];
+       uint8_t pad;
+} __attribute__ ((packed));
+
+/* MIB Attribute */
+#define DOT11_MAC_ADDRESS                 0x21010100   /* MAC Address (R) */
+#define DOT11_PRODUCT_VERSION             0x31024100   /* FirmWare Version (R) */
+#define DOT11_RTS_THRESHOLD               0x21020100   /* RTS Threshold (R/W) */
+#define DOT11_FRAGMENTATION_THRESHOLD     0x21050100   /* Fragment Threshold (R/W) */
+#define DOT11_PRIVACY_INVOKED             0x15010100   /* WEP ON/OFF (W) */
+#define DOT11_WEP_DEFAULT_KEY_ID          0x15020100   /* WEP Index (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE1      0x13020101   /* WEP Key#1(TKIP AES: PairwiseTemporalKey) (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE2      0x13020102   /* WEP Key#2(TKIP AES: GroupKey1) (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE3      0x13020103   /* WEP Key#3(TKIP AES: GroupKey2) (W) */
+#define DOT11_WEP_DEFAULT_KEY_VALUE4      0x13020104   /* WEP Key#4 (W) */
+#define DOT11_WEP_LIST                    0x13020100   /* WEP LIST */
+#define        DOT11_DESIRED_SSID                0x11090100    /* SSID */
+#define        DOT11_CURRENT_CHANNEL             0x45010100    /* channel set */
+#define        DOT11_OPERATION_RATE_SET          0x11110100    /* rate set */
+
+#define LOCAL_AP_SEARCH_INTEAVAL          0xF1010100   /* AP search interval (R/W) */
+#define LOCAL_CURRENTADDRESS              0xF1050100   /* MAC Adress change (W) */
+#define LOCAL_MULTICAST_ADDRESS           0xF1060100   /* Multicast Adress (W) */
+#define LOCAL_MULTICAST_FILTER            0xF1060200   /* Multicast Adress Filter enable/disable (W) */
+#define LOCAL_SEARCHED_AP_LIST            0xF1030100   /* AP list (R) */
+#define LOCAL_LINK_AP_STATUS              0xF1040100   /* Link AP status (R) */
+#define        LOCAL_PACKET_STATISTICS           0xF1020100    /* tx,rx packets statistics */
+#define LOCAL_AP_SCAN_LIST_TYPE_SET      0xF1030200    /* AP_SCAN_LIST_TYPE */
+
+#define DOT11_RSN_ENABLED                 0x15070100   /* WPA enable/disable (W) */
+#define LOCAL_RSN_MODE                    0x56010100   /* RSN mode WPA/WPA2 (W) */
+#define DOT11_RSN_CONFIG_MULTICAST_CIPHER 0x51040100   /* GroupKeyCipherSuite (W) */
+#define DOT11_RSN_CONFIG_UNICAST_CIPHER   0x52020100   /* PairwiseKeyCipherSuite (W) */
+#define DOT11_RSN_CONFIG_AUTH_SUITE       0x53020100   /* AuthenticationKeyManagementSuite (W) */
+#define DOT11_RSN_CONFIG_VERSION          0x51020100   /* RSN version (W) */
+#define LOCAL_RSN_CONFIG_ALL              0x5F010100   /* RSN CONFIG ALL (W) */
+#define DOT11_PMK_TSC                     0x55010100   /* PMK_TSC (W) */
+#define DOT11_GMK1_TSC                    0x55010101   /* GMK1_TSC (W) */
+#define DOT11_GMK2_TSC                    0x55010102   /* GMK2_TSC (W) */
+#define DOT11_GMK3_TSC                   0x55010103    /* GMK3_TSC */
+#define LOCAL_PMK                         0x58010100   /* Pairwise Master Key cache (W) */
+
+#define LOCAL_REGION                      0xF10A0100   /* Region setting */
+
+#ifdef WPS
+#define LOCAL_WPS_ENABLE                  0xF10B0100   /* WiFi Protected Setup */
+#define LOCAL_WPS_PROBE_REQ               0xF10C0100   /* WPS Probe Request */
+#endif /* WPS */
+
+#define LOCAL_GAIN                        0xF10D0100   /* Carrer sense threshold for demo ato show */
+#define LOCAL_EEPROM_SUM                  0xF10E0100   /* EEPROM checksum information */
+
+struct hostif_mib_get_request_t {
+       struct hostif_hdr header;
+       uint32_t mib_attribute;
+} __attribute__ ((packed));
+
+struct hostif_mib_value_t {
+       uint16_t size;
+       uint16_t type;
+#define MIB_VALUE_TYPE_NULL     0
+#define MIB_VALUE_TYPE_INT      1
+#define MIB_VALUE_TYPE_BOOL     2
+#define MIB_VALUE_TYPE_COUNT32  3
+#define MIB_VALUE_TYPE_OSTRING  4
+       uint8_t body[0];
+} __attribute__ ((packed));
+
+struct hostif_mib_get_confirm_t {
+       struct hostif_hdr header;
+       uint32_t mib_status;
+#define MIB_SUCCESS    0
+#define MIB_INVALID    1
+#define MIB_READ_ONLY  2
+#define MIB_WRITE_ONLY 3
+       uint32_t mib_attribute;
+       struct hostif_mib_value_t mib_value;
+} __attribute__ ((packed));
+
+struct hostif_mib_set_request_t {
+       struct hostif_hdr header;
+       uint32_t mib_attribute;
+       struct hostif_mib_value_t mib_value;
+} __attribute__ ((packed));
+
+struct hostif_mib_set_confirm_t {
+       struct hostif_hdr header;
+       uint32_t mib_status;
+       uint32_t mib_attribute;
+} __attribute__ ((packed));
+
+struct hostif_power_mngmt_request_t {
+       struct hostif_hdr header;
+       uint32_t mode;
+#define POWER_ACTIVE  1
+#define POWER_SAVE    2
+       uint32_t wake_up;
+#define SLEEP_FALSE 0
+#define SLEEP_TRUE  1  /* not used */
+       uint32_t receiveDTIMs;
+#define DTIM_FALSE 0
+#define DTIM_TRUE  1
+} __attribute__ ((packed));
+
+/* power management mode */
+enum {
+       POWMGT_ACTIVE_MODE = 0,
+       POWMGT_SAVE1_MODE,
+       POWMGT_SAVE2_MODE
+};
+
+#define        RESULT_SUCCESS            0
+#define        RESULT_INVALID_PARAMETERS 1
+#define        RESULT_NOT_SUPPORTED      2
+/* #define     RESULT_ALREADY_RUNNING    3 */
+#define        RESULT_ALREADY_RUNNING    7
+
+struct hostif_power_mngmt_confirm_t {
+       struct hostif_hdr header;
+       uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_start_request_t {
+       struct hostif_hdr header;
+       uint16_t mode;
+#define MODE_PSEUDO_ADHOC   0
+#define MODE_INFRASTRUCTURE 1
+#define MODE_AP             2  /* not used */
+#define MODE_ADHOC          3
+} __attribute__ ((packed));
+
+struct hostif_start_confirm_t {
+       struct hostif_hdr header;
+       uint16_t result_code;
+} __attribute__ ((packed));
+
+#define SSID_MAX_SIZE 32
+struct ssid_t {
+       uint8_t size;
+       uint8_t body[SSID_MAX_SIZE];
+       uint8_t ssid_pad;
+} __attribute__ ((packed));
+
+#define RATE_SET_MAX_SIZE 16
+struct rate_set8_t {
+       uint8_t size;
+       uint8_t body[8];
+       uint8_t rate_pad;
+} __attribute__ ((packed));
+
+struct FhParms_t {
+       uint16_t dwellTime;
+       uint8_t hopSet;
+       uint8_t hopPattern;
+       uint8_t hopIndex;
+} __attribute__ ((packed));
+
+struct DsParms_t {
+       uint8_t channel;
+} __attribute__ ((packed));
+
+struct CfParms_t {
+       uint8_t count;
+       uint8_t period;
+       uint16_t maxDuration;
+       uint16_t durRemaining;
+} __attribute__ ((packed));
+
+struct IbssParms_t {
+       uint16_t atimWindow;
+} __attribute__ ((packed));
+
+struct rsn_t {
+       uint8_t size;
+#define RSN_BODY_SIZE 64
+       uint8_t body[RSN_BODY_SIZE];
+} __attribute__ ((packed));
+
+struct ErpParams_t {
+       uint8_t erp_info;
+} __attribute__ ((packed));
+
+struct rate_set16_t {
+       uint8_t size;
+       uint8_t body[16];
+       uint8_t rate_pad;
+} __attribute__ ((packed));
+
+struct ap_info_t {
+       uint8_t bssid[6];       /* +00 */
+       uint8_t rssi;   /* +06 */
+       uint8_t sq;     /* +07 */
+       uint8_t noise;  /* +08 */
+       uint8_t pad0;   /* +09 */
+       uint16_t beacon_period; /* +10 */
+       uint16_t capability;    /* +12 */
+#define BSS_CAP_ESS             (1<<0)
+#define BSS_CAP_IBSS            (1<<1)
+#define BSS_CAP_CF_POLABLE      (1<<2)
+#define BSS_CAP_CF_POLL_REQ     (1<<3)
+#define BSS_CAP_PRIVACY         (1<<4)
+#define BSS_CAP_SHORT_PREAMBLE  (1<<5)
+#define BSS_CAP_PBCC            (1<<6)
+#define BSS_CAP_CHANNEL_AGILITY (1<<7)
+#define BSS_CAP_SHORT_SLOT_TIME (1<<10)
+#define BSS_CAP_DSSS_OFDM       (1<<13)
+       uint8_t frame_type;     /* +14 */
+       uint8_t ch_info;        /* +15 */
+#define FRAME_TYPE_BEACON      0x80
+#define FRAME_TYPE_PROBE_RESP  0x50
+       uint16_t body_size;     /* +16 */
+       uint8_t body[1024];     /* +18 */
+       /* +1032 */
+} __attribute__ ((packed));
+
+struct link_ap_info_t {
+       uint8_t bssid[6];       /* +00 */
+       uint8_t rssi;   /* +06 */
+       uint8_t sq;     /* +07 */
+       uint8_t noise;  /* +08 */
+       uint8_t pad0;   /* +09 */
+       uint16_t beacon_period; /* +10 */
+       uint16_t capability;    /* +12 */
+       struct rate_set8_t rate_set;    /* +14 */
+       struct FhParms_t fh_parameter;  /* +24 */
+       struct DsParms_t ds_parameter;  /* +29 */
+       struct CfParms_t cf_parameter;  /* +30 */
+       struct IbssParms_t ibss_parameter;      /* +36 */
+       struct ErpParams_t erp_parameter;       /* +38 */
+       uint8_t pad1;   /* +39 */
+       struct rate_set8_t ext_rate_set;        /* +40 */
+       uint8_t DTIM_period;    /* +50 */
+       uint8_t rsn_mode;       /* +51 */
+#define RSN_MODE_NONE  0
+#define RSN_MODE_WPA   1
+#define RSN_MODE_WPA2  2
+       struct {
+               uint8_t size;   /* +52 */
+               uint8_t body[128];      /* +53 */
+       } __attribute__ ((packed)) rsn;
+} __attribute__ ((packed));
+
+struct hostif_connect_indication_t {
+       struct hostif_hdr header;
+       uint16_t connect_code;
+#define RESULT_CONNECT    0
+#define RESULT_DISCONNECT 1
+       struct link_ap_info_t link_ap_info;
+} __attribute__ ((packed));
+
+struct hostif_stop_request_t {
+       struct hostif_hdr header;
+} __attribute__ ((packed));
+
+struct hostif_stop_confirm_t {
+       struct hostif_hdr header;
+       uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_ps_adhoc_set_request_t {
+       struct hostif_hdr header;
+       uint16_t phy_type;
+#define D_11B_ONLY_MODE                0
+#define D_11G_ONLY_MODE                1
+#define D_11BG_COMPATIBLE_MODE 2
+#define D_11A_ONLY_MODE                3
+       uint16_t cts_mode;
+#define CTS_MODE_FALSE 0
+#define CTS_MODE_TRUE  1
+       uint16_t channel;
+       struct rate_set16_t rate_set;
+       uint16_t capability;    /* bit5:preamble bit6:pbcc pbcc not supported always 0 
+                                * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+       uint16_t scan_type;
+} __attribute__ ((packed));
+
+struct hostif_ps_adhoc_set_confirm_t {
+       struct hostif_hdr header;
+       uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_infrastructure_set_request_t {
+       struct hostif_hdr header;
+       uint16_t phy_type;
+       uint16_t cts_mode;
+       struct rate_set16_t rate_set;
+       struct ssid_t ssid;
+       uint16_t capability;    /* bit5:preamble bit6:pbcc pbcc not supported always 0 
+                                * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+       uint16_t beacon_lost_count;
+       uint16_t auth_type;
+#define AUTH_TYPE_OPEN_SYSTEM 0
+#define AUTH_TYPE_SHARED_KEY  1
+       struct channel_list_t channel_list;
+       uint16_t scan_type;
+} __attribute__ ((packed));
+
+struct hostif_infrastructure_set2_request_t {
+       struct hostif_hdr header;
+       uint16_t phy_type;
+       uint16_t cts_mode;
+       struct rate_set16_t rate_set;
+       struct ssid_t ssid;
+       uint16_t capability;    /* bit5:preamble bit6:pbcc pbcc not supported always 0 
+                                * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+       uint16_t beacon_lost_count;
+       uint16_t auth_type;
+#define AUTH_TYPE_OPEN_SYSTEM 0
+#define AUTH_TYPE_SHARED_KEY  1
+       struct channel_list_t channel_list;
+       uint16_t scan_type;
+       uint8_t bssid[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct hostif_infrastructure_set_confirm_t {
+       struct hostif_hdr header;
+       uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_adhoc_set_request_t {
+       struct hostif_hdr header;
+       uint16_t phy_type;
+       uint16_t cts_mode;
+       uint16_t channel;
+       struct rate_set16_t rate_set;
+       struct ssid_t ssid;
+       uint16_t capability;    /* bit5:preamble bit6:pbcc pbcc not supported always 0 
+                                * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+       uint16_t scan_type;
+} __attribute__ ((packed));
+
+struct hostif_adhoc_set2_request_t {
+       struct hostif_hdr header;
+       uint16_t phy_type;
+       uint16_t cts_mode;
+       uint16_t reserved;
+       struct rate_set16_t rate_set;
+       struct ssid_t ssid;
+       uint16_t capability;    /* bit5:preamble bit6:pbcc pbcc not supported always 0 
+                                * bit10:ShortSlotTime bit13:DSSS-OFDM DSSS-OFDM not supported always 0 */
+       uint16_t scan_type;
+       struct channel_list_t channel_list;
+       uint8_t bssid[ETH_ALEN];
+} __attribute__ ((packed));
+
+struct hostif_adhoc_set_confirm_t {
+       struct hostif_hdr header;
+       uint16_t result_code;
+} __attribute__ ((packed));
+
+struct last_associate_t {
+       uint8_t type;
+       uint8_t status;
+} __attribute__ ((packed));
+
+struct association_request_t {
+       uint8_t type;
+#define FRAME_TYPE_ASSOC_REQ   0x00
+#define FRAME_TYPE_REASSOC_REQ 0x20
+       uint8_t pad;
+       uint16_t capability;
+       uint16_t listen_interval;
+       uint8_t ap_address[6];
+       uint16_t reqIEs_size;
+} __attribute__ ((packed));
+
+struct association_response_t {
+       uint8_t type;
+#define FRAME_TYPE_ASSOC_RESP  0x10
+#define FRAME_TYPE_REASSOC_RESP        0x30
+       uint8_t pad;
+       uint16_t capability;
+       uint16_t status;
+       uint16_t association_id;
+       uint16_t respIEs_size;
+} __attribute__ ((packed));
+
+struct hostif_associate_indication_t {
+       struct hostif_hdr header;
+       struct association_request_t assoc_req;
+       struct association_response_t assoc_resp;
+       /* followed by (reqIEs_size + respIEs_size) octets of data */
+       /* reqIEs data *//* respIEs data */
+} __attribute__ ((packed));
+
+struct hostif_bss_scan_request_t {
+       struct hostif_hdr header;
+       uint8_t scan_type;
+#define ACTIVE_SCAN  0
+#define PASSIVE_SCAN 1
+       uint8_t pad[3];
+       uint32_t ch_time_min;
+       uint32_t ch_time_max;
+       struct channel_list_t channel_list;
+       struct ssid_t ssid;
+} __attribute__ ((packed));
+
+struct hostif_bss_scan_confirm_t {
+       struct hostif_hdr header;
+       uint16_t result_code;
+       uint16_t reserved;
+} __attribute__ ((packed));
+
+struct hostif_phy_information_request_t {
+       struct hostif_hdr header;
+       uint16_t type;
+#define NORMAL_TYPE    0
+#define TIME_TYPE      1
+       uint16_t time;  /* unit 100ms */
+} __attribute__ ((packed));
+
+struct hostif_phy_information_confirm_t {
+       struct hostif_hdr header;
+       uint8_t rssi;
+       uint8_t sq;
+       uint8_t noise;
+       uint8_t link_speed;
+       uint32_t tx_frame;
+       uint32_t rx_frame;
+       uint32_t tx_error;
+       uint32_t rx_error;
+} __attribute__ ((packed));
+
+/* sleep mode */
+#define SLP_ACTIVE  0
+#define SLP_SLEEP   1
+struct hostif_sleep_request_t {
+       struct hostif_hdr header;
+} __attribute__ ((packed));
+
+struct hostif_sleep_confirm_t {
+       struct hostif_hdr header;
+       uint16_t result_code;
+} __attribute__ ((packed));
+
+struct hostif_mic_failure_request_t {
+       struct hostif_hdr header;
+       uint16_t failure_count;
+       uint16_t timer;
+} __attribute__ ((packed));
+
+struct hostif_mic_failure_confirm_t {
+       struct hostif_hdr header;
+       uint16_t result_code;
+} __attribute__ ((packed));
+
+#define BASIC_RATE     0x80
+#define RATE_MASK      0x7F
+
+#define TX_RATE_AUTO      0xff
+#define TX_RATE_1M_FIXED  0
+#define TX_RATE_2M_FIXED  1
+#define TX_RATE_1_2M_AUTO 2
+#define TX_RATE_5M_FIXED  3
+#define TX_RATE_11M_FIXED 4
+
+#define TX_RATE_FULL_AUTO      0
+#define TX_RATE_11_AUTO                1
+#define TX_RATE_11B_AUTO       2
+#define TX_RATE_11BG_AUTO      3
+#define TX_RATE_MANUAL_AUTO    4
+#define TX_RATE_FIXED          5
+
+/* 11b rate */
+#define TX_RATE_1M     (uint8_t)(10/5) /* 11b 11g basic rate */
+#define TX_RATE_2M     (uint8_t)(20/5) /* 11b 11g basic rate */
+#define TX_RATE_5M     (uint8_t)(55/5) /* 11g basic rate */
+#define TX_RATE_11M    (uint8_t)(110/5)        /* 11g basic rate */
+
+/* 11g rate */
+#define TX_RATE_6M     (uint8_t)(60/5) /* 11g basic rate */
+#define TX_RATE_12M    (uint8_t)(120/5)        /* 11g basic rate */
+#define TX_RATE_24M    (uint8_t)(240/5)        /* 11g basic rate */
+#define TX_RATE_9M     (uint8_t)(90/5)
+#define TX_RATE_18M    (uint8_t)(180/5)
+#define TX_RATE_36M    (uint8_t)(360/5)
+#define TX_RATE_48M    (uint8_t)(480/5)
+#define TX_RATE_54M    (uint8_t)(540/5)
+
+#define IS_11B_RATE(A) (((A&RATE_MASK)==TX_RATE_1M)||((A&RATE_MASK)==TX_RATE_2M)||\
+                        ((A&RATE_MASK)==TX_RATE_5M)||((A&RATE_MASK)==TX_RATE_11M))
+
+#define IS_OFDM_RATE(A) (((A&RATE_MASK)==TX_RATE_6M)||((A&RATE_MASK)==TX_RATE_12M)||\
+                        ((A&RATE_MASK)==TX_RATE_24M)||((A&RATE_MASK)==TX_RATE_9M)||\
+                        ((A&RATE_MASK)==TX_RATE_18M)||((A&RATE_MASK)==TX_RATE_36M)||\
+                        ((A&RATE_MASK)==TX_RATE_48M)||((A&RATE_MASK)==TX_RATE_54M))
+
+#define IS_11BG_RATE(A) (IS_11B_RATE(A)||IS_OFDM_RATE(A))
+
+#define IS_OFDM_EXT_RATE(A)  (((A&RATE_MASK)==TX_RATE_9M)||((A&RATE_MASK)==TX_RATE_18M)||\
+                             ((A&RATE_MASK)==TX_RATE_36M)||((A&RATE_MASK)==TX_RATE_48M)||\
+                             ((A&RATE_MASK)==TX_RATE_54M))
+
+enum {
+       CONNECT_STATUS = 0,
+       DISCONNECT_STATUS
+};
+
+/* preamble type */
+enum {
+       LONG_PREAMBLE = 0,
+       SHORT_PREAMBLE
+};
+
+/* multicast filter */
+#define MCAST_FILTER_MCAST    0
+#define MCAST_FILTER_MCASTALL 1
+#define MCAST_FILTER_PROMISC  2
+
+#define NIC_MAX_MCAST_LIST 32
+
+/* macro function */
+#define HIF_EVENT_MASK 0xE800
+#define IS_HIF_IND(_EVENT)  ((_EVENT&HIF_EVENT_MASK)==0xE800  && \
+                             ((_EVENT&~HIF_EVENT_MASK)==0x0001 || \
+                              (_EVENT&~HIF_EVENT_MASK)==0x0006 || \
+                              (_EVENT&~HIF_EVENT_MASK)==0x000C || \
+                              (_EVENT&~HIF_EVENT_MASK)==0x0011 || \
+                              (_EVENT&~HIF_EVENT_MASK)==0x0012))
+
+#define IS_HIF_CONF(_EVENT) ((_EVENT&HIF_EVENT_MASK)==0xE800  && \
+                             (_EVENT&~HIF_EVENT_MASK)>0x0000  && \
+                             (_EVENT&~HIF_EVENT_MASK)<0x0012  && \
+                             !IS_HIF_IND(_EVENT) )
+
+#ifdef __KERNEL__
+
+#include "ks_wlan.h"
+
+/* function prototype */
+extern int hostif_data_request(struct ks_wlan_private *priv,
+                              struct sk_buff *packet);
+extern void hostif_receive(struct ks_wlan_private *priv, unsigned char *p,
+                          unsigned int size);
+extern void hostif_sme_enqueue(struct ks_wlan_private *priv, uint16_t event);
+extern int hostif_init(struct ks_wlan_private *priv);
+extern void hostif_exit(struct ks_wlan_private *priv);
+
+static
+inline int hif_align_size(int size)
+{
+#ifdef KS_ATOM
+       if (size < 1024)
+               size = 1024;
+#endif
+#ifdef DEVICE_ALIGNMENT
+       return (size % DEVICE_ALIGNMENT) ? size + DEVICE_ALIGNMENT -
+           (size % DEVICE_ALIGNMENT) : size;
+#else
+       return size;
+#endif
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* _KS_HOSTIF_H_ */
diff --git a/drivers/staging/ks7010/ks_wlan.h b/drivers/staging/ks7010/ks_wlan.h
new file mode 100644 (file)
index 0000000..f05dc01
--- /dev/null
@@ -0,0 +1,505 @@
+/*
+ *   Driver for KeyStream IEEE802.11 b/g wireless LAN cards.
+ *
+ *   Copyright (C) 2006-2008 KeyStream Corp.
+ *   Copyright (C) 2009 Renesas Technology Corp.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License version 2 as
+ *   published by the Free Software Foundation.
+ */
+
+#ifndef _KS_WLAN_H
+#define _KS_WLAN_H
+
+#define WPS
+
+#include <linux/version.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/spinlock.h>    /* spinlock_t                                   */
+#include <linux/sched.h>       /* wait_queue_head_t                            */
+#include <linux/types.h>       /* pid_t                                        */
+#include <linux/netdevice.h>   /* struct net_device_stats,  struct sk_buff     */
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <asm/atomic.h>        /* struct atmic_t                               */
+#include <linux/timer.h>       /* struct timer_list */
+#include <linux/string.h>
+#include <linux/completion.h>  /* struct completion */
+#include <linux/workqueue.h>
+
+#include <asm/io.h>
+
+#include "ks7010_sdio.h"
+
+#ifdef KS_WLAN_DEBUG
+#define DPRINTK(n, fmt, args...) \
+                 if (KS_WLAN_DEBUG>(n)) printk(KERN_NOTICE "%s: "fmt, __FUNCTION__, ## args)
+#else
+#define DPRINTK(n, fmt, args...)
+#endif
+
+struct ks_wlan_parameter {
+       uint8_t operation_mode; /* Operation Mode */
+       uint8_t channel;        /*  Channel */
+       uint8_t tx_rate;        /*  Transmit Rate */
+       struct {
+               uint8_t size;
+               uint8_t body[16];
+       } rate_set;
+       uint8_t bssid[ETH_ALEN];        /* BSSID */
+       struct {
+               uint8_t size;
+               uint8_t body[32 + 1];
+       } ssid; /*  SSID */
+       uint8_t preamble;       /*  Preamble */
+       uint8_t powermgt;       /*  PowerManagementMode */
+       uint32_t scan_type;     /*  AP List Scan Type */
+#define BEACON_LOST_COUNT_MIN 0
+#define BEACON_LOST_COUNT_MAX 65535
+       uint32_t beacon_lost_count;     /*  Beacon Lost Count */
+       uint32_t rts;   /*  RTS Threashold */
+       uint32_t fragment;      /*  Fragmentation Threashold */
+       uint32_t privacy_invoked;
+       uint32_t wep_index;
+       struct {
+               uint8_t size;
+               uint8_t val[13 * 2 + 1];
+       } wep_key[4];
+       uint16_t authenticate_type;
+       uint16_t phy_type;      /* 11b/11g/11bg mode type */
+       uint16_t cts_mode;      /* for 11g/11bg mode cts mode */
+       uint16_t phy_info_timer;        /* phy information timer */
+};
+
+enum {
+       DEVICE_STATE_OFF = 0,   /* this means hw_unavailable is != 0 */
+       DEVICE_STATE_PREBOOT,   /* we are in a pre-boot state (empty RAM) */
+       DEVICE_STATE_BOOT,      /* boot state (fw upload, run fw) */
+       DEVICE_STATE_PREINIT,   /* pre-init state */
+       DEVICE_STATE_INIT,      /* init state (restore MIB backup to device) */
+       DEVICE_STATE_READY,     /* driver&device are in operational state */
+       DEVICE_STATE_SLEEP      /* device in sleep mode */
+};
+
+/* SME flag */
+#define SME_MODE_SET       (1<<0)
+#define SME_RTS             (1<<1)
+#define SME_FRAG            (1<<2)
+#define SME_WEP_FLAG        (1<<3)
+#define SME_WEP_INDEX       (1<<4)
+#define SME_WEP_VAL1        (1<<5)
+#define SME_WEP_VAL2        (1<<6)
+#define SME_WEP_VAL3        (1<<7)
+#define SME_WEP_VAL4        (1<<8)
+#define SME_WEP_VAL_MASK    (SME_WEP_VAL1|SME_WEP_VAL2|SME_WEP_VAL3|SME_WEP_VAL4)
+#define SME_RSN             (1<<9)
+#define SME_RSN_MULTICAST   (1<<10)
+#define SME_RSN_UNICAST            (1<<11)
+#define SME_RSN_AUTH       (1<<12)
+
+#define SME_AP_SCAN         (1<<13)
+#define SME_MULTICAST       (1<<14)
+
+/* SME Event */
+enum {
+       SME_START,
+
+       SME_MULTICAST_REQUEST,
+       SME_MACADDRESS_SET_REQUEST,
+       SME_BSS_SCAN_REQUEST,
+       SME_SET_FLAG,
+       SME_SET_TXKEY,
+       SME_SET_KEY1,
+       SME_SET_KEY2,
+       SME_SET_KEY3,
+       SME_SET_KEY4,
+       SME_SET_PMK_TSC,
+       SME_SET_GMK1_TSC,
+       SME_SET_GMK2_TSC,
+       SME_SET_GMK3_TSC,
+       SME_SET_PMKSA,
+       SME_POW_MNGMT_REQUEST,
+       SME_PHY_INFO_REQUEST,
+       SME_MIC_FAILURE_REQUEST,
+       SME_GET_MAC_ADDRESS,
+       SME_GET_PRODUCT_VERSION,
+       SME_STOP_REQUEST,
+       SME_RTS_THRESHOLD_REQUEST,
+       SME_FRAGMENTATION_THRESHOLD_REQUEST,
+       SME_WEP_INDEX_REQUEST,
+       SME_WEP_KEY1_REQUEST,
+       SME_WEP_KEY2_REQUEST,
+       SME_WEP_KEY3_REQUEST,
+       SME_WEP_KEY4_REQUEST,
+       SME_WEP_FLAG_REQUEST,
+       SME_RSN_UCAST_REQUEST,
+       SME_RSN_MCAST_REQUEST,
+       SME_RSN_AUTH_REQUEST,
+       SME_RSN_ENABLED_REQUEST,
+       SME_RSN_MODE_REQUEST,
+#ifdef WPS
+       SME_WPS_ENABLE_REQUEST,
+       SME_WPS_PROBE_REQUEST,
+#endif
+       SME_SET_GAIN,
+       SME_GET_GAIN,
+       SME_SLEEP_REQUEST,
+       SME_SET_REGION,
+       SME_MODE_SET_REQUEST,
+       SME_START_REQUEST,
+       SME_GET_EEPROM_CKSUM,
+
+       SME_MIC_FAILURE_CONFIRM,
+       SME_START_CONFIRM,
+
+       SME_MULTICAST_CONFIRM,
+       SME_BSS_SCAN_CONFIRM,
+       SME_GET_CURRENT_AP,
+       SME_POW_MNGMT_CONFIRM,
+       SME_PHY_INFO_CONFIRM,
+       SME_STOP_CONFIRM,
+       SME_RTS_THRESHOLD_CONFIRM,
+       SME_FRAGMENTATION_THRESHOLD_CONFIRM,
+       SME_WEP_INDEX_CONFIRM,
+       SME_WEP_KEY1_CONFIRM,
+       SME_WEP_KEY2_CONFIRM,
+       SME_WEP_KEY3_CONFIRM,
+       SME_WEP_KEY4_CONFIRM,
+       SME_WEP_FLAG_CONFIRM,
+       SME_RSN_UCAST_CONFIRM,
+       SME_RSN_MCAST_CONFIRM,
+       SME_RSN_AUTH_CONFIRM,
+       SME_RSN_ENABLED_CONFIRM,
+       SME_RSN_MODE_CONFIRM,
+       SME_MODE_SET_CONFIRM,
+       SME_SLEEP_CONFIRM,
+
+       SME_RSN_SET_CONFIRM,
+       SME_WEP_SET_CONFIRM,
+       SME_TERMINATE,
+
+       SME_EVENT_SIZE  /* end */
+};
+
+/* SME Status */
+enum {
+       SME_IDLE,
+       SME_SETUP,
+       SME_DISCONNECT,
+       SME_CONNECT
+};
+
+#define        SME_EVENT_BUFF_SIZE     128
+
+struct sme_info {
+       int sme_status;
+       int event_buff[SME_EVENT_BUFF_SIZE];
+       unsigned int qhead;
+       unsigned int qtail;
+#ifdef KS_WLAN_DEBUG
+       /* for debug */
+       unsigned int max_event_count;
+#endif
+       spinlock_t sme_spin;
+       unsigned long sme_flag;
+};
+
+struct hostt_t {
+       int buff[SME_EVENT_BUFF_SIZE];
+       unsigned int qhead;
+       unsigned int qtail;
+};
+
+#define RSN_IE_BODY_MAX 64
+struct rsn_ie_t {
+       uint8_t id;     /* 0xdd = WPA or 0x30 = RSN */
+       uint8_t size;   /* max ? 255 ? */
+       uint8_t body[RSN_IE_BODY_MAX];
+} __attribute__ ((packed));
+
+#ifdef WPS
+#define WPS_IE_BODY_MAX 255
+struct wps_ie_t {
+       uint8_t id;     /* 221 'dd <len> 00 50 F2 04' */
+       uint8_t size;   /* max ? 255 ? */
+       uint8_t body[WPS_IE_BODY_MAX];
+} __attribute__ ((packed));
+#endif /* WPS */
+
+struct local_ap_t {
+       uint8_t bssid[6];
+       uint8_t rssi;
+       uint8_t sq;
+       struct {
+               uint8_t size;
+               uint8_t body[32];
+               uint8_t ssid_pad;
+       } ssid;
+       struct {
+               uint8_t size;
+               uint8_t body[16];
+               uint8_t rate_pad;
+       } rate_set;
+       uint16_t capability;
+       uint8_t channel;
+       uint8_t noise;
+       struct rsn_ie_t wpa_ie;
+       struct rsn_ie_t rsn_ie;
+#ifdef WPS
+       struct wps_ie_t wps_ie;
+#endif /* WPS */
+};
+
+#define LOCAL_APLIST_MAX 31
+#define LOCAL_CURRENT_AP LOCAL_APLIST_MAX
+struct local_aplist_t {
+       int size;
+       struct local_ap_t ap[LOCAL_APLIST_MAX + 1];
+};
+
+struct local_gain_t {
+       uint8_t TxMode;
+       uint8_t RxMode;
+       uint8_t TxGain;
+       uint8_t RxGain;
+};
+
+struct local_eeprom_sum_t {
+       uint8_t type;
+       uint8_t result;
+};
+
+enum {
+       EEPROM_OK,
+       EEPROM_CHECKSUM_NONE,
+       EEPROM_FW_NOT_SUPPORT,
+       EEPROM_NG,
+};
+
+/* Power Save Status */
+enum {
+       PS_NONE,
+       PS_ACTIVE_SET,
+       PS_SAVE_SET,
+       PS_CONF_WAIT,
+       PS_SNOOZE,
+       PS_WAKEUP
+};
+
+struct power_save_status_t {
+       atomic_t status;        /* initialvalue 0 */
+       struct completion wakeup_wait;
+       atomic_t confirm_wait;
+       atomic_t snooze_guard;
+};
+
+struct sleep_status_t {
+       atomic_t status;        /* initialvalue 0 */
+       atomic_t doze_request;
+       atomic_t wakeup_request;
+};
+
+/* WPA */
+struct scan_ext_t {
+       unsigned int flag;
+       char ssid[IW_ESSID_MAX_SIZE + 1];
+};
+
+enum {
+       CIPHER_NONE,
+       CIPHER_WEP40,
+       CIPHER_TKIP,
+       CIPHER_CCMP,
+       CIPHER_WEP104
+};
+
+#define CIPHER_ID_WPA_NONE    "\x00\x50\xf2\x00"
+#define CIPHER_ID_WPA_WEP40   "\x00\x50\xf2\x01"
+#define CIPHER_ID_WPA_TKIP    "\x00\x50\xf2\x02"
+#define CIPHER_ID_WPA_CCMP    "\x00\x50\xf2\x04"
+#define CIPHER_ID_WPA_WEP104  "\x00\x50\xf2\x05"
+
+#define CIPHER_ID_WPA2_NONE   "\x00\x0f\xac\x00"
+#define CIPHER_ID_WPA2_WEP40  "\x00\x0f\xac\x01"
+#define CIPHER_ID_WPA2_TKIP   "\x00\x0f\xac\x02"
+#define CIPHER_ID_WPA2_CCMP   "\x00\x0f\xac\x04"
+#define CIPHER_ID_WPA2_WEP104 "\x00\x0f\xac\x05"
+
+#define CIPHER_ID_LEN    4
+
+enum {
+       KEY_MGMT_802_1X,
+       KEY_MGMT_PSK,
+       KEY_MGMT_WPANONE,
+};
+
+#define KEY_MGMT_ID_WPA_NONE     "\x00\x50\xf2\x00"
+#define KEY_MGMT_ID_WPA_1X       "\x00\x50\xf2\x01"
+#define KEY_MGMT_ID_WPA_PSK      "\x00\x50\xf2\x02"
+#define KEY_MGMT_ID_WPA_WPANONE  "\x00\x50\xf2\xff"
+
+#define KEY_MGMT_ID_WPA2_NONE    "\x00\x0f\xac\x00"
+#define KEY_MGMT_ID_WPA2_1X      "\x00\x0f\xac\x01"
+#define KEY_MGMT_ID_WPA2_PSK     "\x00\x0f\xac\x02"
+#define KEY_MGMT_ID_WPA2_WPANONE "\x00\x0f\xac\xff"
+
+#define KEY_MGMT_ID_LEN  4
+
+#define MIC_KEY_SIZE 8
+
+struct wpa_key_t {
+       uint32_t ext_flags;     /* IW_ENCODE_EXT_xxx */
+       uint8_t tx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+       uint8_t rx_seq[IW_ENCODE_SEQ_MAX_SIZE]; /* LSB first */
+       struct sockaddr addr;   /* ff:ff:ff:ff:ff:ff for broadcast/multicast
+                                * (group) keys or unicast address for
+                                * individual keys */
+       uint16_t alg;
+       uint16_t key_len;       /* WEP: 5 or 13, TKIP: 32, CCMP: 16 */
+       uint8_t key_val[IW_ENCODING_TOKEN_MAX];
+       uint8_t tx_mic_key[MIC_KEY_SIZE];
+       uint8_t rx_mic_key[MIC_KEY_SIZE];
+};
+#define WPA_KEY_INDEX_MAX 4
+#define WPA_RX_SEQ_LEN 6
+
+struct mic_failure_t {
+       uint16_t failure;       /* MIC Failure counter 0 or 1 or 2 */
+       uint16_t counter;       /* 1sec counter 0-60 */
+       uint32_t last_failure_time;
+       int stop;       /* stop flag */
+};
+
+struct wpa_status_t {
+       int wpa_enabled;
+       unsigned int rsn_enabled;
+       int version;
+       int pairwise_suite;     /* unicast cipher */
+       int group_suite;        /* multicast cipher */
+       int key_mgmt_suite;     /* authentication key management suite */
+       int auth_alg;
+       int txkey;
+       struct wpa_key_t key[WPA_KEY_INDEX_MAX];
+       struct scan_ext_t scan_ext;
+       struct mic_failure_t mic_failure;
+};
+
+#include <linux/list.h>
+#define PMK_LIST_MAX 8
+struct pmk_list_t {
+       uint16_t size;
+       struct list_head head;
+       struct pmk_t {
+               struct list_head list;
+               uint8_t bssid[ETH_ALEN];
+               uint8_t pmkid[IW_PMKID_LEN];
+       } pmk[PMK_LIST_MAX];
+};
+
+#ifdef WPS
+struct wps_status_t {
+       int wps_enabled;
+       int ielen;
+       uint8_t ie[255];
+};
+#endif /* WPS */
+
+struct ks_wlan_private {
+
+       struct hw_info_t ks_wlan_hw;    /* hardware information */
+
+       struct net_device *net_dev;
+       int reg_net;    /* register_netdev */
+       struct net_device_stats nstats;
+       struct iw_statistics wstats;
+
+       struct completion confirm_wait;
+
+       /* trx device & sme */
+       struct tx_device tx_dev;
+       struct rx_device rx_dev;
+       struct sme_info sme_i;
+       u8 *rxp;
+       unsigned int rx_size;
+       struct tasklet_struct sme_task;
+       struct work_struct ks_wlan_wakeup_task;
+       int scan_ind_count;
+
+       unsigned char eth_addr[ETH_ALEN];
+
+       struct local_aplist_t aplist;
+       struct local_ap_t current_ap;
+       struct power_save_status_t psstatus;
+       struct sleep_status_t sleepstatus;
+       struct wpa_status_t wpa;
+       struct pmk_list_t pmklist;
+       /* wireless parameter */
+       struct ks_wlan_parameter reg;
+       uint8_t current_rate;
+
+       char nick[IW_ESSID_MAX_SIZE + 1];
+
+       spinlock_t multicast_spin;
+
+       spinlock_t dev_read_lock;
+       wait_queue_head_t devread_wait;
+
+       unsigned int need_commit;       /* for ioctl */
+
+       /* DeviceIoControl */
+       int device_open_status;
+       atomic_t event_count;
+       atomic_t rec_count;
+       int dev_count;
+#define DEVICE_STOCK_COUNT 20
+       unsigned char *dev_data[DEVICE_STOCK_COUNT];
+       int dev_size[DEVICE_STOCK_COUNT];
+
+       /* ioctl : IOCTL_FIRMWARE_VERSION */
+       unsigned char firmware_version[128 + 1];
+       int version_size;
+
+       int mac_address_valid;  /* Mac Address Status */
+
+       int dev_state;
+
+       struct sk_buff *skb;
+       unsigned int cur_rx;    /* Index into the Rx buffer of next Rx pkt. */
+       /* spinlock_t lock; */
+#define FORCE_DISCONNECT    0x80000000
+#define CONNECT_STATUS_MASK 0x7FFFFFFF
+       uint32_t connect_status;        /* connect status */
+       int infra_status;       /* Infractructure status */
+
+       uint8_t data_buff[0x1000];
+
+       uint8_t scan_ssid_len;
+       uint8_t scan_ssid[IW_ESSID_MAX_SIZE + 1];
+       struct local_gain_t gain;
+#ifdef WPS
+       struct net_device *l2_dev;
+       int l2_fd;
+       struct wps_status_t wps;
+#endif /* WPS */
+       uint8_t sleep_mode;
+
+       uint8_t region;
+       struct local_eeprom_sum_t eeprom_sum;
+       uint8_t eeprom_checksum;
+
+       struct hostt_t hostt;
+
+       unsigned long last_doze;
+       unsigned long last_wakeup;
+
+       uint wakeup_count;      /* for detect wakeup loop */
+};
+
+extern int ks_wlan_net_start(struct net_device *dev);
+extern int ks_wlan_net_stop(struct net_device *dev);
+
+#endif /* _KS_WLAN_H */
diff --git a/drivers/staging/ks7010/ks_wlan_ioctl.h b/drivers/staging/ks7010/ks_wlan_ioctl.h
new file mode 100644 (file)
index 0000000..49369e4
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ *   Driver for KeyStream 11b/g wireless LAN
+ *   
+ *   Copyright (c) 2005-2008 KeyStream Corp.
+ *   Copyright (C) 2009 Renesas Technology Corp.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License version 2 as
+ *   published by the Free Software Foundation.
+ */
+
+#ifndef _KS_WLAN_IOCTL_H
+#define _KS_WLAN_IOCTL_H
+
+#include <linux/wireless.h>
+/* The low order bit identify a SET (0) or a GET (1) ioctl.  */
+
+/*                                     SIOCIWFIRSTPRIV+0 */
+/* former KS_WLAN_GET_DRIVER_VERSION   SIOCIWFIRSTPRIV+1 */
+/*                                     SIOCIWFIRSTPRIV+2 */
+#define KS_WLAN_GET_FIRM_VERSION       SIOCIWFIRSTPRIV+3
+#ifdef WPS
+#define KS_WLAN_SET_WPS_ENABLE                 SIOCIWFIRSTPRIV+4
+#define KS_WLAN_GET_WPS_ENABLE                 SIOCIWFIRSTPRIV+5
+#define KS_WLAN_SET_WPS_PROBE_REQ      SIOCIWFIRSTPRIV+6
+#endif
+#define KS_WLAN_GET_EEPROM_CKSUM       SIOCIWFIRSTPRIV+7
+#define KS_WLAN_SET_PREAMBLE           SIOCIWFIRSTPRIV+8
+#define KS_WLAN_GET_PREAMBLE           SIOCIWFIRSTPRIV+9
+#define KS_WLAN_SET_POWER_SAVE         SIOCIWFIRSTPRIV+10
+#define KS_WLAN_GET_POWER_SAVE         SIOCIWFIRSTPRIV+11
+#define KS_WLAN_SET_SCAN_TYPE          SIOCIWFIRSTPRIV+12
+#define KS_WLAN_GET_SCAN_TYPE          SIOCIWFIRSTPRIV+13
+#define KS_WLAN_SET_RX_GAIN            SIOCIWFIRSTPRIV+14
+#define KS_WLAN_GET_RX_GAIN            SIOCIWFIRSTPRIV+15
+#define KS_WLAN_HOSTT                  SIOCIWFIRSTPRIV+16      /* unused */
+//#define KS_WLAN_SET_REGION            SIOCIWFIRSTPRIV+17
+#define KS_WLAN_SET_BEACON_LOST                SIOCIWFIRSTPRIV+18
+#define KS_WLAN_GET_BEACON_LOST                SIOCIWFIRSTPRIV+19
+
+#define KS_WLAN_SET_TX_GAIN            SIOCIWFIRSTPRIV+20
+#define KS_WLAN_GET_TX_GAIN            SIOCIWFIRSTPRIV+21
+
+/* for KS7010 */
+#define KS_WLAN_SET_PHY_TYPE           SIOCIWFIRSTPRIV+22
+#define KS_WLAN_GET_PHY_TYPE           SIOCIWFIRSTPRIV+23
+#define KS_WLAN_SET_CTS_MODE           SIOCIWFIRSTPRIV+24
+#define KS_WLAN_GET_CTS_MODE           SIOCIWFIRSTPRIV+25
+/*                                     SIOCIWFIRSTPRIV+26 */
+/*                                     SIOCIWFIRSTPRIV+27 */
+#define KS_WLAN_SET_SLEEP_MODE         SIOCIWFIRSTPRIV+28      /* sleep mode */
+#define KS_WLAN_GET_SLEEP_MODE         SIOCIWFIRSTPRIV+29      /* sleep mode */
+/*                                     SIOCIWFIRSTPRIV+30 */
+/*                                     SIOCIWFIRSTPRIV+31 */
+
+#ifdef __KERNEL__
+
+#include "ks_wlan.h"
+#include <linux/netdevice.h>
+
+extern int ks_wlan_read_config_file(struct ks_wlan_private *priv);
+extern int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
+                                  unsigned int commit_flag);
+
+#endif /* __KERNEL__ */
+
+#endif /* _KS_WLAN_IOCTL_H */
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
new file mode 100644 (file)
index 0000000..1e21eb1
--- /dev/null
@@ -0,0 +1,3528 @@
+/*
+ *   Driver for KeyStream 11b/g wireless LAN
+ *
+ *   Copyright (C) 2005-2008 KeyStream Corp.
+ *   Copyright (C) 2009 Renesas Technology Corp.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License version 2 as
+ *   published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/mii.h>
+#include <linux/pci.h>
+#include <linux/ctype.h>
+#include <linux/timer.h>
+#include <asm/atomic.h>
+#include <linux/io.h>
+#include <asm/uaccess.h>
+
+static int wep_on_off;
+#define        WEP_OFF         0
+#define        WEP_ON_64BIT    1
+#define        WEP_ON_128BIT   2
+
+#include "ks_wlan.h"
+#include "ks_hostif.h"
+#include "ks_wlan_ioctl.h"
+
+/* Include Wireless Extension definition and check version */
+#include <linux/wireless.h>
+#define WIRELESS_SPY   /* enable iwspy support */
+#include <net/iw_handler.h>    /* New driver API */
+
+/* Frequency list (map channels to frequencies) */
+static const long frequency_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+       2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
+
+/* A few details needed for WEP (Wireless Equivalent Privacy) */
+#define MAX_KEY_SIZE 13        /* 128 (?) bits */
+#define MIN_KEY_SIZE  5        /* 40 bits RC4 - WEP */
+typedef struct wep_key_t {
+       u16 len;
+       u8 key[16];     /* 40-bit and 104-bit keys */
+} wep_key_t;
+
+/* Backward compatibility */
+#ifndef IW_ENCODE_NOKEY
+#define IW_ENCODE_NOKEY 0x0800 /* Key is write only, so not present */
+#define IW_ENCODE_MODE  (IW_ENCODE_DISABLED | IW_ENCODE_RESTRICTED | IW_ENCODE_OPEN)
+#endif /* IW_ENCODE_NOKEY */
+
+/* List of Wireless Handlers (new API) */
+static const struct iw_handler_def ks_wlan_handler_def;
+
+#define KSC_OPNOTSUPP  /* Operation Not Support */
+
+/*
+ *     function prototypes
+ */
+extern int ks_wlan_hw_tx(struct ks_wlan_private *priv, void *p,
+                        unsigned long size,
+                        void (*complete_handler) (void *arg1, void *arg2),
+                        void *arg1, void *arg2);
+static int ks_wlan_open(struct net_device *dev);
+static void ks_wlan_tx_timeout(struct net_device *dev);
+static int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static int ks_wlan_close(struct net_device *dev);
+static void ks_wlan_set_multicast_list(struct net_device *dev);
+static struct net_device_stats *ks_wlan_get_stats(struct net_device *dev);
+static int ks_wlan_set_mac_address(struct net_device *dev, void *addr);
+static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
+                               int cmd);
+
+static atomic_t update_phyinfo;
+static struct timer_list update_phyinfo_timer;
+static
+int ks_wlan_update_phy_information(struct ks_wlan_private *priv)
+{
+       struct iw_statistics *wstats = &priv->wstats;
+
+       DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
+
+       if (priv->dev_state < DEVICE_STATE_READY) {
+               return -1;      /* not finished initialize */
+       }
+       if (atomic_read(&update_phyinfo))
+               return 1;
+
+       /* The status */
+       wstats->status = priv->reg.operation_mode;      /* Operation mode */
+
+       /* Signal quality and co. But where is the noise level ??? */
+       hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
+
+       /* interruptible_sleep_on_timeout(&priv->confirm_wait, HZ/2); */
+       if (!wait_for_completion_interruptible_timeout
+           (&priv->confirm_wait, HZ / 2)) {
+               DPRINTK(1, "wait time out!!\n");
+       }
+
+       atomic_inc(&update_phyinfo);
+       update_phyinfo_timer.expires = jiffies + HZ;    /* 1sec */
+       add_timer(&update_phyinfo_timer);
+
+       return 0;
+}
+
+static
+void ks_wlan_update_phyinfo_timeout(unsigned long ptr)
+{
+       DPRINTK(4, "in_interrupt = %ld\n", in_interrupt());
+       atomic_set(&update_phyinfo, 0);
+}
+
+int ks_wlan_setup_parameter(struct ks_wlan_private *priv,
+                           unsigned int commit_flag)
+{
+       DPRINTK(2, "\n");
+
+       hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+
+       if (commit_flag & SME_RTS)
+               hostif_sme_enqueue(priv, SME_RTS_THRESHOLD_REQUEST);
+       if (commit_flag & SME_FRAG)
+               hostif_sme_enqueue(priv, SME_FRAGMENTATION_THRESHOLD_REQUEST);
+
+       if (commit_flag & SME_WEP_INDEX)
+               hostif_sme_enqueue(priv, SME_WEP_INDEX_REQUEST);
+       if (commit_flag & SME_WEP_VAL1)
+               hostif_sme_enqueue(priv, SME_WEP_KEY1_REQUEST);
+       if (commit_flag & SME_WEP_VAL2)
+               hostif_sme_enqueue(priv, SME_WEP_KEY2_REQUEST);
+       if (commit_flag & SME_WEP_VAL3)
+               hostif_sme_enqueue(priv, SME_WEP_KEY3_REQUEST);
+       if (commit_flag & SME_WEP_VAL4)
+               hostif_sme_enqueue(priv, SME_WEP_KEY4_REQUEST);
+       if (commit_flag & SME_WEP_FLAG)
+               hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
+
+       if (commit_flag & SME_RSN) {
+               hostif_sme_enqueue(priv, SME_RSN_ENABLED_REQUEST);
+               hostif_sme_enqueue(priv, SME_RSN_MODE_REQUEST);
+       }
+       if (commit_flag & SME_RSN_MULTICAST)
+               hostif_sme_enqueue(priv, SME_RSN_MCAST_REQUEST);
+       if (commit_flag & SME_RSN_UNICAST)
+               hostif_sme_enqueue(priv, SME_RSN_UCAST_REQUEST);
+       if (commit_flag & SME_RSN_AUTH)
+               hostif_sme_enqueue(priv, SME_RSN_AUTH_REQUEST);
+
+       hostif_sme_enqueue(priv, SME_MODE_SET_REQUEST);
+
+       hostif_sme_enqueue(priv, SME_START_REQUEST);
+
+       return 0;
+}
+
+/*
+ * Initial Wireless Extension code for Ks_Wlannet driver by :
+ *     Jean Tourrilhes <jt@hpl.hp.com> - HPL - 17 November 00
+ * Conversion to new driver API by :
+ *     Jean Tourrilhes <jt@hpl.hp.com> - HPL - 26 March 02
+ * Javier also did a good amount of work here, adding some new extensions
+ * and fixing my code. Let's just say that without him this code just
+ * would not work at all... - Jean II
+ */
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get protocol name */
+static int ks_wlan_get_name(struct net_device *dev,
+                           struct iw_request_info *info, char *cwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (priv->dev_state < DEVICE_STATE_READY) {
+               strcpy(cwrq, "NOT READY!");
+       } else if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+               strcpy(cwrq, "IEEE 802.11b");
+       } else if (priv->reg.phy_type == D_11G_ONLY_MODE) {
+               strcpy(cwrq, "IEEE 802.11g");
+       } else {
+               strcpy(cwrq, "IEEE 802.11b/g");
+       }
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set frequency */
+static int ks_wlan_set_freq(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_freq *fwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int rc = -EINPROGRESS;  /* Call commit handler */
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /* If setting by frequency, convert to a channel */
+       if ((fwrq->e == 1) &&
+           (fwrq->m >= (int)2.412e8) && (fwrq->m <= (int)2.487e8)) {
+               int f = fwrq->m / 100000;
+               int c = 0;
+               while ((c < 14) && (f != frequency_list[c]))
+                       c++;
+               /* Hack to fall through... */
+               fwrq->e = 0;
+               fwrq->m = c + 1;
+       }
+       /* Setting by channel number */
+       if ((fwrq->m > 1000) || (fwrq->e > 0))
+               rc = -EOPNOTSUPP;
+       else {
+               int channel = fwrq->m;
+               /* We should do a better check than that,
+                * based on the card capability !!! */
+               if ((channel < 1) || (channel > 14)) {
+                       printk(KERN_DEBUG
+                              "%s: New channel value of %d is invalid!\n",
+                              dev->name, fwrq->m);
+                       rc = -EINVAL;
+               } else {
+                       /* Yes ! We can set it !!! */
+                       priv->reg.channel = (u8) (channel);
+                       priv->need_commit |= SME_MODE_SET;
+               }
+       }
+
+       return rc;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get frequency */
+static int ks_wlan_get_freq(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_freq *fwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int f;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+               f = (int)priv->current_ap.channel;
+       } else
+               f = (int)priv->reg.channel;
+       fwrq->m = frequency_list[f - 1] * 100000;
+       fwrq->e = 1;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set ESSID */
+static int ks_wlan_set_essid(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       size_t len;
+
+       DPRINTK(2, " %d\n", dwrq->flags);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /* Check if we asked for `any' */
+       if (dwrq->flags == 0) {
+               /* Just send an empty SSID list */
+               memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
+               priv->reg.ssid.size = 0;
+       } else {
+#if 1
+               len = dwrq->length;
+               /* iwconfig uses nul termination in SSID.. */
+               if (len > 0 && extra[len - 1] == '\0')
+                       len--;
+
+               /* Check the size of the string */
+               if (len > IW_ESSID_MAX_SIZE) {
+                       return -EINVAL;
+               }
+#else
+               /* Check the size of the string */
+               if (dwrq->length > IW_ESSID_MAX_SIZE + 1) {
+                       return -E2BIG;
+               }
+#endif
+
+               /* Set the SSID */
+               memset(priv->reg.ssid.body, 0, sizeof(priv->reg.ssid.body));
+
+#if 1
+               memcpy(priv->reg.ssid.body, extra, len);
+               priv->reg.ssid.size = len;
+#else
+               memcpy(priv->reg.ssid.body, extra, dwrq->length);
+               priv->reg.ssid.size = dwrq->length;
+#endif
+       }
+       /* Write it to the card */
+       priv->need_commit |= SME_MODE_SET;
+
+//      return  -EINPROGRESS;   /* Call commit handler */
+       ks_wlan_setup_parameter(priv, priv->need_commit);
+       priv->need_commit = 0;
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get ESSID */
+static int ks_wlan_get_essid(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /* Note : if dwrq->flags != 0, we should
+        * get the relevant SSID from the SSID list... */
+       if (priv->reg.ssid.size) {
+               /* Get the current SSID */
+               memcpy(extra, priv->reg.ssid.body, priv->reg.ssid.size);
+#if 0
+               extra[priv->reg.ssid.size] = '\0';
+#endif
+               /* If none, we may want to get the one that was set */
+
+               /* Push it out ! */
+#if 1
+               dwrq->length = priv->reg.ssid.size;
+#else
+               dwrq->length = priv->reg.ssid.size + 1;
+#endif
+               dwrq->flags = 1;        /* active */
+       } else {
+#if 1
+               dwrq->length = 0;
+#else
+               extra[0] = '\0';
+               dwrq->length = 1;
+#endif
+               dwrq->flags = 0;        /* ANY */
+       }
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set AP address */
+static int ks_wlan_set_wap(struct net_device *dev, struct iw_request_info *info,
+                          struct sockaddr *ap_addr, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (priv->reg.operation_mode == MODE_ADHOC ||
+           priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+               memcpy(priv->reg.bssid, (u8 *) & ap_addr->sa_data, ETH_ALEN);
+
+               if (is_valid_ether_addr((u8 *) priv->reg.bssid)) {
+                       priv->need_commit |= SME_MODE_SET;
+               }
+       } else {
+               memset(priv->reg.bssid, 0x0, ETH_ALEN);
+               return -EOPNOTSUPP;
+       }
+
+       DPRINTK(2, "bssid = %02x:%02x:%02x:%02x:%02x:%02x\n",
+               priv->reg.bssid[0], priv->reg.bssid[1], priv->reg.bssid[2],
+               priv->reg.bssid[3], priv->reg.bssid[4], priv->reg.bssid[5]);
+
+       /* Write it to the card */
+       if (priv->need_commit) {
+               priv->need_commit |= SME_MODE_SET;
+               return -EINPROGRESS;    /* Call commit handler */
+       }
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get AP address */
+static int ks_wlan_get_wap(struct net_device *dev, struct iw_request_info *info,
+                          struct sockaddr *awrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+               memcpy(awrq->sa_data, &(priv->current_ap.bssid[0]), ETH_ALEN);
+       } else {
+               memset(awrq->sa_data, 0, ETH_ALEN);
+       }
+
+       awrq->sa_family = ARPHRD_ETHER;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Nickname */
+static int ks_wlan_set_nick(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_point *dwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /* Check the size of the string */
+       if (dwrq->length > 16 + 1) {
+               return -E2BIG;
+       }
+       memset(priv->nick, 0, sizeof(priv->nick));
+       memcpy(priv->nick, extra, dwrq->length);
+
+       return -EINPROGRESS;    /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Nickname */
+static int ks_wlan_get_nick(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_point *dwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       strncpy(extra, priv->nick, 16);
+       extra[16] = '\0';
+       dwrq->length = strlen(extra) + 1;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Bit-Rate */
+static int ks_wlan_set_rate(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_param *vwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int i = 0;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+               if (vwrq->fixed == 1) {
+                       switch (vwrq->value) {
+                       case 11000000:
+                       case 5500000:
+                               priv->reg.rate_set.body[0] =
+                                   (uint8_t) (vwrq->value / 500000);
+                               break;
+                       case 2000000:
+                       case 1000000:
+                               priv->reg.rate_set.body[0] =
+                                   ((uint8_t) (vwrq->value / 500000)) |
+                                   BASIC_RATE;
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+                       priv->reg.tx_rate = TX_RATE_FIXED;
+                       priv->reg.rate_set.size = 1;
+               } else {        /* vwrq->fixed == 0 */
+                       if (vwrq->value > 0) {
+                               switch (vwrq->value) {
+                               case 11000000:
+                                       priv->reg.rate_set.body[3] =
+                                           TX_RATE_11M;
+                                       i++;
+                               case 5500000:
+                                       priv->reg.rate_set.body[2] = TX_RATE_5M;
+                                       i++;
+                               case 2000000:
+                                       priv->reg.rate_set.body[1] =
+                                           TX_RATE_2M | BASIC_RATE;
+                                       i++;
+                               case 1000000:
+                                       priv->reg.rate_set.body[0] =
+                                           TX_RATE_1M | BASIC_RATE;
+                                       i++;
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
+                               priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
+                               priv->reg.rate_set.size = i;
+                       } else {
+                               priv->reg.rate_set.body[3] = TX_RATE_11M;
+                               priv->reg.rate_set.body[2] = TX_RATE_5M;
+                               priv->reg.rate_set.body[1] =
+                                   TX_RATE_2M | BASIC_RATE;
+                               priv->reg.rate_set.body[0] =
+                                   TX_RATE_1M | BASIC_RATE;
+                               priv->reg.tx_rate = TX_RATE_FULL_AUTO;
+                               priv->reg.rate_set.size = 4;
+                       }
+               }
+       } else {        /* D_11B_ONLY_MODE or  D_11BG_COMPATIBLE_MODE */
+               if (vwrq->fixed == 1) {
+                       switch (vwrq->value) {
+                       case 54000000:
+                       case 48000000:
+                       case 36000000:
+                       case 18000000:
+                       case 9000000:
+                               priv->reg.rate_set.body[0] =
+                                   (uint8_t) (vwrq->value / 500000);
+                               break;
+                       case 24000000:
+                       case 12000000:
+                       case 11000000:
+                       case 6000000:
+                       case 5500000:
+                       case 2000000:
+                       case 1000000:
+                               priv->reg.rate_set.body[0] =
+                                   ((uint8_t) (vwrq->value / 500000)) |
+                                   BASIC_RATE;
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+                       priv->reg.tx_rate = TX_RATE_FIXED;
+                       priv->reg.rate_set.size = 1;
+               } else {        /* vwrq->fixed == 0 */
+                       if (vwrq->value > 0) {
+                               switch (vwrq->value) {
+                               case 54000000:
+                                       priv->reg.rate_set.body[11] =
+                                           TX_RATE_54M;
+                                       i++;
+                               case 48000000:
+                                       priv->reg.rate_set.body[10] =
+                                           TX_RATE_48M;
+                                       i++;
+                               case 36000000:
+                                       priv->reg.rate_set.body[9] =
+                                           TX_RATE_36M;
+                                       i++;
+                               case 24000000:
+                               case 18000000:
+                               case 12000000:
+                               case 11000000:
+                               case 9000000:
+                               case 6000000:
+                                       if (vwrq->value == 24000000) {
+                                               priv->reg.rate_set.body[8] =
+                                                   TX_RATE_18M;
+                                               i++;
+                                               priv->reg.rate_set.body[7] =
+                                                   TX_RATE_9M;
+                                               i++;
+                                               priv->reg.rate_set.body[6] =
+                                                   TX_RATE_24M | BASIC_RATE;
+                                               i++;
+                                               priv->reg.rate_set.body[5] =
+                                                   TX_RATE_12M | BASIC_RATE;
+                                               i++;
+                                               priv->reg.rate_set.body[4] =
+                                                   TX_RATE_6M | BASIC_RATE;
+                                               i++;
+                                               priv->reg.rate_set.body[3] =
+                                                   TX_RATE_11M | BASIC_RATE;
+                                               i++;
+                                       } else if (vwrq->value == 18000000) {
+                                               priv->reg.rate_set.body[7] =
+                                                   TX_RATE_18M;
+                                               i++;
+                                               priv->reg.rate_set.body[6] =
+                                                   TX_RATE_9M;
+                                               i++;
+                                               priv->reg.rate_set.body[5] =
+                                                   TX_RATE_12M | BASIC_RATE;
+                                               i++;
+                                               priv->reg.rate_set.body[4] =
+                                                   TX_RATE_6M | BASIC_RATE;
+                                               i++;
+                                               priv->reg.rate_set.body[3] =
+                                                   TX_RATE_11M | BASIC_RATE;
+                                               i++;
+                                       } else if (vwrq->value == 12000000) {
+                                               priv->reg.rate_set.body[6] =
+                                                   TX_RATE_9M;
+                                               i++;
+                                               priv->reg.rate_set.body[5] =
+                                                   TX_RATE_12M | BASIC_RATE;
+                                               i++;
+                                               priv->reg.rate_set.body[4] =
+                                                   TX_RATE_6M | BASIC_RATE;
+                                               i++;
+                                               priv->reg.rate_set.body[3] =
+                                                   TX_RATE_11M | BASIC_RATE;
+                                               i++;
+                                       } else if (vwrq->value == 11000000) {
+                                               priv->reg.rate_set.body[5] =
+                                                   TX_RATE_9M;
+                                               i++;
+                                               priv->reg.rate_set.body[4] =
+                                                   TX_RATE_6M | BASIC_RATE;
+                                               i++;
+                                               priv->reg.rate_set.body[3] =
+                                                   TX_RATE_11M | BASIC_RATE;
+                                               i++;
+                                       } else if (vwrq->value == 9000000) {
+                                               priv->reg.rate_set.body[4] =
+                                                   TX_RATE_9M;
+                                               i++;
+                                               priv->reg.rate_set.body[3] =
+                                                   TX_RATE_6M | BASIC_RATE;
+                                               i++;
+                                       } else {        /* vwrq->value == 6000000 */
+                                               priv->reg.rate_set.body[3] =
+                                                   TX_RATE_6M | BASIC_RATE;
+                                               i++;
+                                       }
+                               case 5500000:
+                                       priv->reg.rate_set.body[2] =
+                                           TX_RATE_5M | BASIC_RATE;
+                                       i++;
+                               case 2000000:
+                                       priv->reg.rate_set.body[1] =
+                                           TX_RATE_2M | BASIC_RATE;
+                                       i++;
+                               case 1000000:
+                                       priv->reg.rate_set.body[0] =
+                                           TX_RATE_1M | BASIC_RATE;
+                                       i++;
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
+                               priv->reg.tx_rate = TX_RATE_MANUAL_AUTO;
+                               priv->reg.rate_set.size = i;
+                       } else {
+                               priv->reg.rate_set.body[11] = TX_RATE_54M;
+                               priv->reg.rate_set.body[10] = TX_RATE_48M;
+                               priv->reg.rate_set.body[9] = TX_RATE_36M;
+                               priv->reg.rate_set.body[8] = TX_RATE_18M;
+                               priv->reg.rate_set.body[7] = TX_RATE_9M;
+                               priv->reg.rate_set.body[6] =
+                                   TX_RATE_24M | BASIC_RATE;
+                               priv->reg.rate_set.body[5] =
+                                   TX_RATE_12M | BASIC_RATE;
+                               priv->reg.rate_set.body[4] =
+                                   TX_RATE_6M | BASIC_RATE;
+                               priv->reg.rate_set.body[3] =
+                                   TX_RATE_11M | BASIC_RATE;
+                               priv->reg.rate_set.body[2] =
+                                   TX_RATE_5M | BASIC_RATE;
+                               priv->reg.rate_set.body[1] =
+                                   TX_RATE_2M | BASIC_RATE;
+                               priv->reg.rate_set.body[0] =
+                                   TX_RATE_1M | BASIC_RATE;
+                               priv->reg.tx_rate = TX_RATE_FULL_AUTO;
+                               priv->reg.rate_set.size = 12;
+                       }
+               }
+       }
+
+       priv->need_commit |= SME_MODE_SET;
+
+       return -EINPROGRESS;    /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Bit-Rate */
+static int ks_wlan_get_rate(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_param *vwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       DPRINTK(2, "in_interrupt = %ld update_phyinfo = %d\n",
+               in_interrupt(), atomic_read(&update_phyinfo));
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (!atomic_read(&update_phyinfo)) {
+               ks_wlan_update_phy_information(priv);
+       }
+       vwrq->value = ((priv->current_rate) & RATE_MASK) * 500000;
+       if (priv->reg.tx_rate == TX_RATE_FIXED)
+               vwrq->fixed = 1;
+       else
+               vwrq->fixed = 0;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set RTS threshold */
+static int ks_wlan_set_rts(struct net_device *dev, struct iw_request_info *info,
+                          struct iw_param *vwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int rthr = vwrq->value;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (vwrq->disabled)
+               rthr = 2347;
+       if ((rthr < 0) || (rthr > 2347)) {
+               return -EINVAL;
+       }
+       priv->reg.rts = rthr;
+       priv->need_commit |= SME_RTS;
+
+       return -EINPROGRESS;    /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get RTS threshold */
+static int ks_wlan_get_rts(struct net_device *dev, struct iw_request_info *info,
+                          struct iw_param *vwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       vwrq->value = priv->reg.rts;
+       vwrq->disabled = (vwrq->value >= 2347);
+       vwrq->fixed = 1;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Fragmentation threshold */
+static int ks_wlan_set_frag(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_param *vwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int fthr = vwrq->value;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (vwrq->disabled)
+               fthr = 2346;
+       if ((fthr < 256) || (fthr > 2346)) {
+               return -EINVAL;
+       }
+       fthr &= ~0x1;   /* Get an even value - is it really needed ??? */
+       priv->reg.fragment = fthr;
+       priv->need_commit |= SME_FRAG;
+
+       return -EINPROGRESS;    /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Fragmentation threshold */
+static int ks_wlan_get_frag(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_param *vwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       vwrq->value = priv->reg.fragment;
+       vwrq->disabled = (vwrq->value >= 2346);
+       vwrq->fixed = 1;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Mode of Operation */
+static int ks_wlan_set_mode(struct net_device *dev,
+                           struct iw_request_info *info, __u32 * uwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       DPRINTK(2, "mode=%d\n", *uwrq);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       switch (*uwrq) {
+       case IW_MODE_ADHOC:
+               priv->reg.operation_mode = MODE_ADHOC;
+               priv->need_commit |= SME_MODE_SET;
+               break;
+       case IW_MODE_INFRA:
+               priv->reg.operation_mode = MODE_INFRASTRUCTURE;
+               priv->need_commit |= SME_MODE_SET;
+               break;
+       case IW_MODE_AUTO:
+       case IW_MODE_MASTER:
+       case IW_MODE_REPEAT:
+       case IW_MODE_SECOND:
+       case IW_MODE_MONITOR:
+       default:
+               return -EINVAL;
+       }
+
+       return -EINPROGRESS;    /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Mode of Operation */
+static int ks_wlan_get_mode(struct net_device *dev,
+                           struct iw_request_info *info, __u32 * uwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /* If not managed, assume it's ad-hoc */
+       switch (priv->reg.operation_mode) {
+       case MODE_INFRASTRUCTURE:
+               *uwrq = IW_MODE_INFRA;
+               break;
+       case MODE_ADHOC:
+               *uwrq = IW_MODE_ADHOC;
+               break;
+       default:
+               *uwrq = IW_MODE_ADHOC;
+       }
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Encryption Key */
+static int ks_wlan_set_encode(struct net_device *dev,
+                             struct iw_request_info *info,
+                             struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       wep_key_t key;
+       int index = (dwrq->flags & IW_ENCODE_INDEX);
+       int current_index = priv->reg.wep_index;
+       int i;
+
+       DPRINTK(2, "flags=%04X\n", dwrq->flags);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /* index check */
+       if ((index < 0) || (index > 4))
+               return -EINVAL;
+       else if (index == 0)
+               index = current_index;
+       else
+               index--;
+
+       /* Is WEP supported ? */
+       /* Basic checking: do we have a key to set ? */
+       if (dwrq->length > 0) {
+               if (dwrq->length > MAX_KEY_SIZE) {      /* Check the size of the key */
+                       return -EINVAL;
+               }
+               if (dwrq->length > MIN_KEY_SIZE) {      /* Set the length */
+                       key.len = MAX_KEY_SIZE;
+                       priv->reg.privacy_invoked = 0x01;
+                       priv->need_commit |= SME_WEP_FLAG;
+                       wep_on_off = WEP_ON_128BIT;
+               } else {
+                       if (dwrq->length > 0) {
+                               key.len = MIN_KEY_SIZE;
+                               priv->reg.privacy_invoked = 0x01;
+                               priv->need_commit |= SME_WEP_FLAG;
+                               wep_on_off = WEP_ON_64BIT;
+                       } else {        /* Disable the key */
+                               key.len = 0;
+                       }
+               }
+               /* Check if the key is not marked as invalid */
+               if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
+                       /* Cleanup */
+                       memset(key.key, 0, MAX_KEY_SIZE);
+                       /* Copy the key in the driver */
+                       if (copy_from_user
+                           (key.key, dwrq->pointer, dwrq->length)) {
+                               key.len = 0;
+                               return -EFAULT;
+                       }
+                       /* Send the key to the card */
+                       priv->reg.wep_key[index].size = key.len;
+                       for (i = 0; i < (priv->reg.wep_key[index].size); i++) {
+                               priv->reg.wep_key[index].val[i] = key.key[i];
+                       }
+                       priv->need_commit |= (SME_WEP_VAL1 << index);
+                       priv->reg.wep_index = index;
+                       priv->need_commit |= SME_WEP_INDEX;
+               }
+       } else {
+               if (dwrq->flags & IW_ENCODE_DISABLED) {
+                       priv->reg.wep_key[0].size = 0;
+                       priv->reg.wep_key[1].size = 0;
+                       priv->reg.wep_key[2].size = 0;
+                       priv->reg.wep_key[3].size = 0;
+                       priv->reg.privacy_invoked = 0x00;
+                       if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+                               priv->need_commit |= SME_MODE_SET;
+                       }
+                       priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
+                       wep_on_off = WEP_OFF;
+                       priv->need_commit |= SME_WEP_FLAG;
+               } else {
+                       /* Do we want to just set the transmit key index ? */
+                       if ((index >= 0) && (index < 4)) {
+                               /* set_wep_key(priv, index, 0, 0, 1);   xxx */
+                               if (priv->reg.wep_key[index].size) {
+                                       priv->reg.wep_index = index;
+                                       priv->need_commit |= SME_WEP_INDEX;
+                               } else
+                                       return -EINVAL;
+                       }
+               }
+       }
+
+       /* Commit the changes if needed */
+       if (dwrq->flags & IW_ENCODE_MODE)
+               priv->need_commit |= SME_WEP_FLAG;
+
+       if (dwrq->flags & IW_ENCODE_OPEN) {
+               if (priv->reg.authenticate_type == AUTH_TYPE_SHARED_KEY) {
+                       priv->need_commit |= SME_MODE_SET;
+               }
+               priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
+       } else if (dwrq->flags & IW_ENCODE_RESTRICTED) {
+               if (priv->reg.authenticate_type == AUTH_TYPE_OPEN_SYSTEM) {
+                       priv->need_commit |= SME_MODE_SET;
+               }
+               priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
+       }
+//      return -EINPROGRESS;            /* Call commit handler */
+       if (priv->need_commit) {
+               ks_wlan_setup_parameter(priv, priv->need_commit);
+               priv->need_commit = 0;
+       }
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Encryption Key */
+static int ks_wlan_get_encode(struct net_device *dev,
+                             struct iw_request_info *info,
+                             struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       char zeros[16];
+       int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       dwrq->flags = IW_ENCODE_DISABLED;
+
+       /* Check encryption mode */
+       switch (priv->reg.authenticate_type) {
+       case AUTH_TYPE_OPEN_SYSTEM:
+               dwrq->flags = IW_ENCODE_OPEN;
+               break;
+       case AUTH_TYPE_SHARED_KEY:
+               dwrq->flags = IW_ENCODE_RESTRICTED;
+               break;
+       }
+
+       memset(zeros, 0, sizeof(zeros));
+
+       /* Which key do we want ? -1 -> tx index */
+       if ((index < 0) || (index >= 4))
+               index = priv->reg.wep_index;
+       if (priv->reg.privacy_invoked) {
+               dwrq->flags &= ~IW_ENCODE_DISABLED;
+               /* dwrq->flags |= IW_ENCODE_NOKEY; */
+       }
+       dwrq->flags |= index + 1;
+       DPRINTK(2, "encoding flag = 0x%04X\n", dwrq->flags);
+       /* Copy the key to the user buffer */
+       if ((index >= 0) && (index < 4))
+               dwrq->length = priv->reg.wep_key[index].size;
+       if (dwrq->length > 16) {
+               dwrq->length = 0;
+       }
+#if 1  /* IW_ENCODE_NOKEY; */
+       if (dwrq->length) {
+               if ((index >= 0) && (index < 4))
+                       memcpy(extra, priv->reg.wep_key[index].val,
+                              dwrq->length);
+       } else
+               memcpy(extra, zeros, dwrq->length);
+#endif
+       return 0;
+}
+
+#ifndef KSC_OPNOTSUPP
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Tx-Power */
+static int ks_wlan_set_txpow(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_param *vwrq, char *extra)
+{
+       return -EOPNOTSUPP;     /* Not Support */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Tx-Power */
+static int ks_wlan_get_txpow(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_param *vwrq, char *extra)
+{
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /* Not Support */
+       vwrq->value = 0;
+       vwrq->disabled = (vwrq->value == 0);
+       vwrq->fixed = 1;
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Retry limits */
+static int ks_wlan_set_retry(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_param *vwrq, char *extra)
+{
+       return -EOPNOTSUPP;     /* Not Support */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Retry limits */
+static int ks_wlan_get_retry(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_param *vwrq, char *extra)
+{
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /* Not Support */
+       vwrq->value = 0;
+       vwrq->disabled = (vwrq->value == 0);
+       vwrq->fixed = 1;
+       return 0;
+}
+#endif /* KSC_OPNOTSUPP */
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get range info */
+static int ks_wlan_get_range(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_range *range = (struct iw_range *)extra;
+       int i, k;
+
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       dwrq->length = sizeof(struct iw_range);
+       memset(range, 0, sizeof(*range));
+       range->min_nwid = 0x0000;
+       range->max_nwid = 0x0000;
+       range->num_channels = 14;
+       /* Should be based on cap_rid.country to give only
+        * what the current card support */
+       k = 0;
+       for (i = 0; i < 13; i++) {      /* channel 1 -- 13 */
+               range->freq[k].i = i + 1;       /* List index */
+               range->freq[k].m = frequency_list[i] * 100000;
+               range->freq[k++].e = 1; /* Values in table in MHz -> * 10^5 * 10 */
+       }
+       range->num_frequency = k;
+       if (priv->reg.phy_type == D_11B_ONLY_MODE || priv->reg.phy_type == D_11BG_COMPATIBLE_MODE) {    /* channel 14 */
+               range->freq[13].i = 14; /* List index */
+               range->freq[13].m = frequency_list[13] * 100000;
+               range->freq[13].e = 1;  /* Values in table in MHz -> * 10^5 * 10 */
+               range->num_frequency = 14;
+       }
+
+       /* Hum... Should put the right values there */
+       range->max_qual.qual = 100;
+       range->max_qual.level = 256 - 128;      /* 0 dBm? */
+       range->max_qual.noise = 256 - 128;
+       range->sensitivity = 1;
+
+       if (priv->reg.phy_type == D_11B_ONLY_MODE) {
+               range->bitrate[0] = 1e6;
+               range->bitrate[1] = 2e6;
+               range->bitrate[2] = 5.5e6;
+               range->bitrate[3] = 11e6;
+               range->num_bitrates = 4;
+       } else {        /* D_11G_ONLY_MODE or D_11BG_COMPATIBLE_MODE */
+               range->bitrate[0] = 1e6;
+               range->bitrate[1] = 2e6;
+               range->bitrate[2] = 5.5e6;
+               range->bitrate[3] = 11e6;
+
+               range->bitrate[4] = 6e6;
+               range->bitrate[5] = 9e6;
+               range->bitrate[6] = 12e6;
+               if (IW_MAX_BITRATES < 9) {
+                       range->bitrate[7] = 54e6;
+                       range->num_bitrates = 8;
+               } else {
+                       range->bitrate[7] = 18e6;
+                       range->bitrate[8] = 24e6;
+                       range->bitrate[9] = 36e6;
+                       range->bitrate[10] = 48e6;
+                       range->bitrate[11] = 54e6;
+
+                       range->num_bitrates = 12;
+               }
+       }
+
+       /* Set an indication of the max TCP throughput
+        * in bit/s that we can expect using this interface.
+        * May be use for QoS stuff... Jean II */
+       if (i > 2)
+               range->throughput = 5000 * 1000;
+       else
+               range->throughput = 1500 * 1000;
+
+       range->min_rts = 0;
+       range->max_rts = 2347;
+       range->min_frag = 256;
+       range->max_frag = 2346;
+
+       range->encoding_size[0] = 5;    /* WEP: RC4 40 bits */
+       range->encoding_size[1] = 13;   /* WEP: RC4 ~128 bits */
+       range->num_encoding_sizes = 2;
+       range->max_encoding_tokens = 4;
+
+       /* power management not support */
+       range->pmp_flags = IW_POWER_ON;
+       range->pmt_flags = IW_POWER_ON;
+       range->pm_capa = 0;
+
+       /* Transmit Power - values are in dBm( or mW) */
+       range->txpower[0] = -256;
+       range->num_txpower = 1;
+       range->txpower_capa = IW_TXPOW_DBM;
+       /* range->txpower_capa = IW_TXPOW_MWATT; */
+
+       range->we_version_source = 21;
+       range->we_version_compiled = WIRELESS_EXT;
+
+       range->retry_capa = IW_RETRY_ON;
+       range->retry_flags = IW_RETRY_ON;
+       range->r_time_flags = IW_RETRY_ON;
+
+       /* Experimental measurements - boundary 11/5.5 Mb/s */
+       /* Note : with or without the (local->rssi), results
+        * are somewhat different. - Jean II */
+       range->avg_qual.qual = 50;
+       range->avg_qual.level = 186;    /* -70 dBm */
+       range->avg_qual.noise = 0;
+
+       /* Event capability (kernel + driver) */
+       range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
+                               IW_EVENT_CAPA_MASK(SIOCGIWAP) |
+                               IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
+       range->event_capa[1] = IW_EVENT_CAPA_K_1;
+       range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
+                               IW_EVENT_CAPA_MASK(IWEVMICHAELMICFAILURE));
+
+       /* encode extension (WPA) capability */
+       range->enc_capa = (IW_ENC_CAPA_WPA |
+                          IW_ENC_CAPA_WPA2 |
+                          IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP);
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Power Management */
+static int ks_wlan_set_power(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_param *vwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       short enabled;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       enabled = vwrq->disabled ? 0 : 1;
+       if (enabled == 0) {     /* 0 */
+               priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+       } else if (enabled) {   /* 1 */
+               if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+                       priv->reg.powermgt = POWMGT_SAVE1_MODE;
+               else
+                       return -EINVAL;
+       } else if (enabled) {   /* 2 */
+               if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+                       priv->reg.powermgt = POWMGT_SAVE2_MODE;
+               else
+                       return -EINVAL;
+       } else
+               return -EINVAL;
+
+       hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Power Management */
+static int ks_wlan_get_power(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_param *vwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (priv->reg.powermgt > 0)
+               vwrq->disabled = 0;
+       else
+               vwrq->disabled = 1;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get wirless statistics */
+static int ks_wlan_get_iwstats(struct net_device *dev,
+                              struct iw_request_info *info,
+                              struct iw_quality *vwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       vwrq->qual = 0; /* not supported */
+       vwrq->level = priv->wstats.qual.level;
+       vwrq->noise = 0;        /* not supported */
+       vwrq->updated = 0;
+
+       return 0;
+}
+
+#ifndef KSC_OPNOTSUPP
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set Sensitivity */
+static int ks_wlan_set_sens(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_param *vwrq,
+                           char *extra)
+{
+       return -EOPNOTSUPP;     /* Not Support */
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get Sensitivity */
+static int ks_wlan_get_sens(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_param *vwrq,
+                           char *extra)
+{
+       /* Not Support */
+       vwrq->value = 0;
+       vwrq->disabled = (vwrq->value == 0);
+       vwrq->fixed = 1;
+       return 0;
+}
+#endif /* KSC_OPNOTSUPP */
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get AP List */
+/* Note : this is deprecated in favor of IWSCAN */
+static int ks_wlan_get_aplist(struct net_device *dev,
+                             struct iw_request_info *info,
+                             struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct sockaddr *address = (struct sockaddr *)extra;
+       struct iw_quality qual[LOCAL_APLIST_MAX];
+
+       int i;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       for (i = 0; i < priv->aplist.size; i++) {
+               memcpy(address[i].sa_data, &(priv->aplist.ap[i].bssid[0]),
+                      ETH_ALEN);
+               address[i].sa_family = ARPHRD_ETHER;
+               qual[i].level = 256 - priv->aplist.ap[i].rssi;
+               qual[i].qual = priv->aplist.ap[i].sq;
+               qual[i].noise = 0;      /* invalid noise value */
+               qual[i].updated = 7;
+       }
+       if (i) {
+               dwrq->flags = 1;        /* Should be define'd */
+               memcpy(extra + sizeof(struct sockaddr) * i,
+                      &qual, sizeof(struct iw_quality) * i);
+       }
+       dwrq->length = i;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : Initiate Scan */
+static int ks_wlan_set_scan(struct net_device *dev,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_scan_req *req = NULL;
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /* specified SSID SCAN */
+       if (wrqu->data.length == sizeof(struct iw_scan_req)
+           && wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+               req = (struct iw_scan_req *)extra;
+               priv->scan_ssid_len = req->essid_len;
+               memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
+       } else {
+               priv->scan_ssid_len = 0;
+       }
+
+       priv->sme_i.sme_flag |= SME_AP_SCAN;
+       hostif_sme_enqueue(priv, SME_BSS_SCAN_REQUEST);
+
+       /* At this point, just return to the user. */
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/*
+ * Translate scan data returned from the card to a card independent
+ * format that the Wireless Tools will understand - Jean II
+ */
+static inline char *ks_wlan_translate_scan(struct net_device *dev,
+                                          struct iw_request_info *info,
+                                          char *current_ev, char *end_buf,
+                                          struct local_ap_t *ap)
+{
+       /* struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv; */
+       struct iw_event iwe;    /* Temporary buffer */
+       u16 capabilities;
+       char *current_val;      /* For rates */
+       int i;
+       static const char rsn_leader[] = "rsn_ie=";
+       static const char wpa_leader[] = "wpa_ie=";
+       char buf0[RSN_IE_BODY_MAX * 2 + 30];
+       char buf1[RSN_IE_BODY_MAX * 2 + 30];
+       char *pbuf;
+       /* First entry *MUST* be the AP MAC address */
+       iwe.cmd = SIOCGIWAP;
+       iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+       memcpy(iwe.u.ap_addr.sa_data, ap->bssid, ETH_ALEN);
+       current_ev =
+           iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+                                IW_EV_ADDR_LEN);
+
+       /* Other entries will be displayed in the order we give them */
+
+       /* Add the ESSID */
+       iwe.u.data.length = ap->ssid.size;
+       if (iwe.u.data.length > 32)
+               iwe.u.data.length = 32;
+       iwe.cmd = SIOCGIWESSID;
+       iwe.u.data.flags = 1;
+       current_ev =
+           iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+                                &(ap->ssid.body[0]));
+
+       /* Add mode */
+       iwe.cmd = SIOCGIWMODE;
+       capabilities = le16_to_cpu(ap->capability);
+       if (capabilities & (BSS_CAP_ESS | BSS_CAP_IBSS)) {
+               if (capabilities & BSS_CAP_ESS)
+                       iwe.u.mode = IW_MODE_INFRA;
+               else
+                       iwe.u.mode = IW_MODE_ADHOC;
+               current_ev =
+                   iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+                                        IW_EV_UINT_LEN);
+       }
+
+       /* Add frequency */
+       iwe.cmd = SIOCGIWFREQ;
+       iwe.u.freq.m = ap->channel;
+       iwe.u.freq.m = frequency_list[iwe.u.freq.m - 1] * 100000;
+       iwe.u.freq.e = 1;
+       current_ev =
+           iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+                                IW_EV_FREQ_LEN);
+
+       /* Add quality statistics */
+       iwe.cmd = IWEVQUAL;
+       iwe.u.qual.level = 256 - ap->rssi;
+       iwe.u.qual.qual = ap->sq;
+       iwe.u.qual.noise = 0;   /* invalid noise value */
+       current_ev =
+           iwe_stream_add_event(info, current_ev, end_buf, &iwe,
+                                IW_EV_QUAL_LEN);
+
+       /* Add encryption capability */
+       iwe.cmd = SIOCGIWENCODE;
+       if (capabilities & BSS_CAP_PRIVACY)
+               iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+       else
+               iwe.u.data.flags = IW_ENCODE_DISABLED;
+       iwe.u.data.length = 0;
+       current_ev =
+           iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+                                &(ap->ssid.body[0]));
+
+       /* Rate : stuffing multiple values in a single event require a bit
+        * more of magic - Jean II */
+       current_val = current_ev + IW_EV_LCP_LEN;
+
+       iwe.cmd = SIOCGIWRATE;
+       /* Those two flags are ignored... */
+       iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+
+       /* Max 16 values */
+       for (i = 0; i < 16; i++) {
+               /* NULL terminated */
+               if (i >= ap->rate_set.size)
+                       break;
+               /* Bit rate given in 500 kb/s units (+ 0x80) */
+               iwe.u.bitrate.value = ((ap->rate_set.body[i] & 0x7f) * 500000);
+               /* Add new value to event */
+               current_val =
+                   iwe_stream_add_value(info, current_ev, current_val, end_buf,
+                                        &iwe, IW_EV_PARAM_LEN);
+       }
+       /* Check if we added any event */
+       if ((current_val - current_ev) > IW_EV_LCP_LEN)
+               current_ev = current_val;
+
+#define GENERIC_INFO_ELEM_ID 0xdd
+#define RSN_INFO_ELEM_ID 0x30
+       if (ap->rsn_ie.id == RSN_INFO_ELEM_ID && ap->rsn_ie.size != 0) {
+               pbuf = &buf0[0];
+               memset(&iwe, 0, sizeof(iwe));
+               iwe.cmd = IWEVCUSTOM;
+               memcpy(buf0, rsn_leader, sizeof(rsn_leader) - 1);
+               iwe.u.data.length += sizeof(rsn_leader) - 1;
+               pbuf += sizeof(rsn_leader) - 1;
+
+               pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.id);
+               pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.size);
+               iwe.u.data.length += 4;
+
+               for (i = 0; i < ap->rsn_ie.size; i++)
+                       pbuf += sprintf(pbuf, "%02x", ap->rsn_ie.body[i]);
+               iwe.u.data.length += (ap->rsn_ie.size) * 2;
+
+               DPRINTK(4, "ap->rsn.size=%d\n", ap->rsn_ie.size);
+
+               current_ev =
+                   iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+                                        &buf0[0]);
+       }
+       if (ap->wpa_ie.id == GENERIC_INFO_ELEM_ID && ap->wpa_ie.size != 0) {
+               pbuf = &buf1[0];
+               memset(&iwe, 0, sizeof(iwe));
+               iwe.cmd = IWEVCUSTOM;
+               memcpy(buf1, wpa_leader, sizeof(wpa_leader) - 1);
+               iwe.u.data.length += sizeof(wpa_leader) - 1;
+               pbuf += sizeof(wpa_leader) - 1;
+
+               pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.id);
+               pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.size);
+               iwe.u.data.length += 4;
+
+               for (i = 0; i < ap->wpa_ie.size; i++)
+                       pbuf += sprintf(pbuf, "%02x", ap->wpa_ie.body[i]);
+               iwe.u.data.length += (ap->wpa_ie.size) * 2;
+
+               DPRINTK(4, "ap->rsn.size=%d\n", ap->wpa_ie.size);
+               DPRINTK(4, "iwe.u.data.length=%d\n", iwe.u.data.length);
+
+               current_ev =
+                   iwe_stream_add_point(info, current_ev, end_buf, &iwe,
+                                        &buf1[0]);
+       }
+
+       /* The other data in the scan result are not really
+        * interesting, so for now drop it - Jean II */
+       return current_ev;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : Read Scan Results */
+static int ks_wlan_get_scan(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_point *dwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int i;
+       char *current_ev = extra;
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (priv->sme_i.sme_flag & SME_AP_SCAN) {
+               DPRINTK(2, "flag AP_SCAN\n");
+               return -EAGAIN;
+       }
+
+       if (priv->aplist.size == 0) {
+               /* Client error, no scan results...
+                * The caller need to restart the scan. */
+               DPRINTK(2, "aplist 0\n");
+               return -ENODATA;
+       }
+#if 0
+       /* current connect ap */
+       if ((priv->connect_status & CONNECT_STATUS_MASK) == CONNECT_STATUS) {
+               if ((extra + dwrq->length) - current_ev <= IW_EV_ADDR_LEN) {
+                       dwrq->length = 0;
+                       return -E2BIG;
+               }
+               current_ev = ks_wlan_translate_scan(dev, current_ev,
+//                                                  extra + IW_SCAN_MAX_DATA,
+                                                   extra + dwrq->length,
+                                                   &(priv->current_ap));
+       }
+#endif
+       /* Read and parse all entries */
+       for (i = 0; i < priv->aplist.size; i++) {
+               if ((extra + dwrq->length) - current_ev <= IW_EV_ADDR_LEN) {
+                       dwrq->length = 0;
+                       return -E2BIG;
+               }
+               /* Translate to WE format this entry */
+               current_ev = ks_wlan_translate_scan(dev, info, current_ev,
+//                                                  extra + IW_SCAN_MAX_DATA,
+                                                   extra + dwrq->length,
+                                                   &(priv->aplist.ap[i]));
+       }
+       /* Length of data */
+       dwrq->length = (current_ev - extra);
+       dwrq->flags = 0;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Commit handler : called after a bunch of SET operations */
+static int ks_wlan_config_commit(struct net_device *dev,
+                                struct iw_request_info *info, void *zwrq,
+                                char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (!priv->need_commit)
+               return 0;
+
+       ks_wlan_setup_parameter(priv, priv->need_commit);
+       priv->need_commit = 0;
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless handler : set association ie params */
+static int ks_wlan_set_genie(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       return 0;
+//      return -EOPNOTSUPP;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless handler : set authentication mode params */
+static int ks_wlan_set_auth_mode(struct net_device *dev,
+                                struct iw_request_info *info,
+                                struct iw_param *vwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int index = (vwrq->flags & IW_AUTH_INDEX);
+       int value = vwrq->value;
+
+       DPRINTK(2, "index=%d:value=%08X\n", index, value);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       switch (index) {
+       case IW_AUTH_WPA_VERSION:       /* 0 */
+               switch (value) {
+               case IW_AUTH_WPA_VERSION_DISABLED:
+                       priv->wpa.version = value;
+                       if (priv->wpa.rsn_enabled) {
+                               priv->wpa.rsn_enabled = 0;
+                       }
+                       priv->need_commit |= SME_RSN;
+                       break;
+               case IW_AUTH_WPA_VERSION_WPA:
+               case IW_AUTH_WPA_VERSION_WPA2:
+                       priv->wpa.version = value;
+                       if (!(priv->wpa.rsn_enabled)) {
+                               priv->wpa.rsn_enabled = 1;
+                       }
+                       priv->need_commit |= SME_RSN;
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+       case IW_AUTH_CIPHER_PAIRWISE:   /* 1 */
+               switch (value) {
+               case IW_AUTH_CIPHER_NONE:
+                       if (priv->reg.privacy_invoked) {
+                               priv->reg.privacy_invoked = 0x00;
+                               priv->need_commit |= SME_WEP_FLAG;
+                       }
+                       break;
+               case IW_AUTH_CIPHER_WEP40:
+               case IW_AUTH_CIPHER_TKIP:
+               case IW_AUTH_CIPHER_CCMP:
+               case IW_AUTH_CIPHER_WEP104:
+                       if (!priv->reg.privacy_invoked) {
+                               priv->reg.privacy_invoked = 0x01;
+                               priv->need_commit |= SME_WEP_FLAG;
+                       }
+                       priv->wpa.pairwise_suite = value;
+                       priv->need_commit |= SME_RSN_UNICAST;
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+       case IW_AUTH_CIPHER_GROUP:      /* 2 */
+               switch (value) {
+               case IW_AUTH_CIPHER_NONE:
+                       if (priv->reg.privacy_invoked) {
+                               priv->reg.privacy_invoked = 0x00;
+                               priv->need_commit |= SME_WEP_FLAG;
+                       }
+                       break;
+               case IW_AUTH_CIPHER_WEP40:
+               case IW_AUTH_CIPHER_TKIP:
+               case IW_AUTH_CIPHER_CCMP:
+               case IW_AUTH_CIPHER_WEP104:
+                       if (!priv->reg.privacy_invoked) {
+                               priv->reg.privacy_invoked = 0x01;
+                               priv->need_commit |= SME_WEP_FLAG;
+                       }
+                       priv->wpa.group_suite = value;
+                       priv->need_commit |= SME_RSN_MULTICAST;
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+       case IW_AUTH_KEY_MGMT:  /* 3 */
+               switch (value) {
+               case IW_AUTH_KEY_MGMT_802_1X:
+               case IW_AUTH_KEY_MGMT_PSK:
+               case 0: /* NONE or 802_1X_NO_WPA */
+               case 4: /* WPA_NONE */
+                       priv->wpa.key_mgmt_suite = value;
+                       priv->need_commit |= SME_RSN_AUTH;
+                       break;
+               default:
+                       return -EOPNOTSUPP;
+               }
+               break;
+       case IW_AUTH_80211_AUTH_ALG:    /* 6 */
+               switch (value) {
+               case IW_AUTH_ALG_OPEN_SYSTEM:
+                       priv->wpa.auth_alg = value;
+                       priv->reg.authenticate_type = AUTH_TYPE_OPEN_SYSTEM;
+                       break;
+               case IW_AUTH_ALG_SHARED_KEY:
+                       priv->wpa.auth_alg = value;
+                       priv->reg.authenticate_type = AUTH_TYPE_SHARED_KEY;
+                       break;
+               case IW_AUTH_ALG_LEAP:
+               default:
+                       return -EOPNOTSUPP;
+               }
+               priv->need_commit |= SME_MODE_SET;
+               break;
+       case IW_AUTH_WPA_ENABLED:       /* 7 */
+               priv->wpa.wpa_enabled = value;
+               break;
+       case IW_AUTH_PRIVACY_INVOKED:   /* 10 */
+               if ((value && !priv->reg.privacy_invoked) ||
+                   (!value && priv->reg.privacy_invoked)) {
+                       priv->reg.privacy_invoked = value ? 0x01 : 0x00;
+                       priv->need_commit |= SME_WEP_FLAG;
+               }
+               break;
+       case IW_AUTH_RX_UNENCRYPTED_EAPOL:      /* 4 */
+       case IW_AUTH_TKIP_COUNTERMEASURES:      /* 5 */
+       case IW_AUTH_DROP_UNENCRYPTED:  /* 8 */
+       case IW_AUTH_ROAMING_CONTROL:   /* 9 */
+       default:
+               break;
+       }
+
+       /* return -EINPROGRESS; */
+       if (priv->need_commit) {
+               ks_wlan_setup_parameter(priv, priv->need_commit);
+               priv->need_commit = 0;
+       }
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless handler : get authentication mode params */
+static int ks_wlan_get_auth_mode(struct net_device *dev,
+                                struct iw_request_info *info,
+                                struct iw_param *vwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       int index = (vwrq->flags & IW_AUTH_INDEX);
+       DPRINTK(2, "index=%d\n", index);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /*  WPA (not used ?? wpa_supplicant) */
+       switch (index) {
+       case IW_AUTH_WPA_VERSION:
+               vwrq->value = priv->wpa.version;
+               break;
+       case IW_AUTH_CIPHER_PAIRWISE:
+               vwrq->value = priv->wpa.pairwise_suite;
+               break;
+       case IW_AUTH_CIPHER_GROUP:
+               vwrq->value = priv->wpa.group_suite;
+               break;
+       case IW_AUTH_KEY_MGMT:
+               vwrq->value = priv->wpa.key_mgmt_suite;
+               break;
+       case IW_AUTH_80211_AUTH_ALG:
+               vwrq->value = priv->wpa.auth_alg;
+               break;
+       case IW_AUTH_WPA_ENABLED:
+               vwrq->value = priv->wpa.rsn_enabled;
+               break;
+       case IW_AUTH_RX_UNENCRYPTED_EAPOL:      /* OK??? */
+       case IW_AUTH_TKIP_COUNTERMEASURES:
+       case IW_AUTH_DROP_UNENCRYPTED:
+       default:
+               /* return -EOPNOTSUPP; */
+               break;
+       }
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set encoding token & mode (WPA)*/
+static int ks_wlan_set_encode_ext(struct net_device *dev,
+                                 struct iw_request_info *info,
+                                 struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_encode_ext *enc;
+       int index = dwrq->flags & IW_ENCODE_INDEX;
+       unsigned int commit = 0;
+
+       enc = (struct iw_encode_ext *)extra;
+
+       DPRINTK(2, "flags=%04X:: ext_flags=%08X\n", dwrq->flags,
+               enc->ext_flags);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (index < 1 || index > 4)
+               return -EINVAL;
+       else
+               index--;
+
+       if (dwrq->flags & IW_ENCODE_DISABLED) {
+               priv->wpa.key[index].key_len = 0;
+       }
+
+       if (enc) {
+               priv->wpa.key[index].ext_flags = enc->ext_flags;
+               if (enc->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+                       priv->wpa.txkey = index;
+                       commit |= SME_WEP_INDEX;
+               } else if (enc->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
+                       memcpy(&priv->wpa.key[index].rx_seq[0],
+                              enc->rx_seq, IW_ENCODE_SEQ_MAX_SIZE);
+               }
+
+               memcpy(&priv->wpa.key[index].addr.sa_data[0],
+                      &enc->addr.sa_data[0], ETH_ALEN);
+
+               switch (enc->alg) {
+               case IW_ENCODE_ALG_NONE:
+                       if (priv->reg.privacy_invoked) {
+                               priv->reg.privacy_invoked = 0x00;
+                               commit |= SME_WEP_FLAG;
+                       }
+                       priv->wpa.key[index].key_len = 0;
+
+                       break;
+               case IW_ENCODE_ALG_WEP:
+               case IW_ENCODE_ALG_CCMP:
+                       if (!priv->reg.privacy_invoked) {
+                               priv->reg.privacy_invoked = 0x01;
+                               commit |= SME_WEP_FLAG;
+                       }
+                       if (enc->key_len) {
+                               memcpy(&priv->wpa.key[index].key_val[0],
+                                      &enc->key[0], enc->key_len);
+                               priv->wpa.key[index].key_len = enc->key_len;
+                               commit |= (SME_WEP_VAL1 << index);
+                       }
+                       break;
+               case IW_ENCODE_ALG_TKIP:
+                       if (!priv->reg.privacy_invoked) {
+                               priv->reg.privacy_invoked = 0x01;
+                               commit |= SME_WEP_FLAG;
+                       }
+                       if (enc->key_len == 32) {
+                               memcpy(&priv->wpa.key[index].key_val[0],
+                                      &enc->key[0], enc->key_len - 16);
+                               priv->wpa.key[index].key_len =
+                                   enc->key_len - 16;
+                               if (priv->wpa.key_mgmt_suite == 4) {    /* WPA_NONE */
+                                       memcpy(&priv->wpa.key[index].
+                                              tx_mic_key[0], &enc->key[16], 8);
+                                       memcpy(&priv->wpa.key[index].
+                                              rx_mic_key[0], &enc->key[16], 8);
+                               } else {
+                                       memcpy(&priv->wpa.key[index].
+                                              tx_mic_key[0], &enc->key[16], 8);
+                                       memcpy(&priv->wpa.key[index].
+                                              rx_mic_key[0], &enc->key[24], 8);
+                               }
+                               commit |= (SME_WEP_VAL1 << index);
+                       }
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               priv->wpa.key[index].alg = enc->alg;
+       } else
+               return -EINVAL;
+
+       if (commit) {
+               if (commit & SME_WEP_INDEX)
+                       hostif_sme_enqueue(priv, SME_SET_TXKEY);
+               if (commit & SME_WEP_VAL_MASK)
+                       hostif_sme_enqueue(priv, SME_SET_KEY1 + index);
+               if (commit & SME_WEP_FLAG)
+                       hostif_sme_enqueue(priv, SME_WEP_FLAG_REQUEST);
+       }
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : get encoding token & mode (WPA)*/
+static int ks_wlan_get_encode_ext(struct net_device *dev,
+                                 struct iw_request_info *info,
+                                 struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+
+       /* for SLEEP MODE */
+       /*  WPA (not used ?? wpa_supplicant)
+          struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+          struct iw_encode_ext *enc;
+          enc = (struct iw_encode_ext *)extra;
+          int index = dwrq->flags & IW_ENCODE_INDEX;
+          WPA (not used ?? wpa_supplicant) */
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : PMKSA cache operation (WPA2) */
+static int ks_wlan_set_pmksa(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_pmksa *pmksa;
+       int i;
+       struct pmk_t *pmk;
+       struct list_head *ptr;
+
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (!extra) {
+               return -EINVAL;
+       }
+       pmksa = (struct iw_pmksa *)extra;
+       DPRINTK(2, "cmd=%d\n", pmksa->cmd);
+
+       switch (pmksa->cmd) {
+       case IW_PMKSA_ADD:
+               if (list_empty(&priv->pmklist.head)) {  /* new list */
+                       for (i = 0; i < PMK_LIST_MAX; i++) {
+                               pmk = &priv->pmklist.pmk[i];
+                               if (!memcmp
+                                   ("\x00\x00\x00\x00\x00\x00", pmk->bssid,
+                                    ETH_ALEN))
+                                       break;
+                       }
+                       memcpy(pmk->bssid, pmksa->bssid.sa_data, ETH_ALEN);
+                       memcpy(pmk->pmkid, pmksa->pmkid, IW_PMKID_LEN);
+                       list_add(&pmk->list, &priv->pmklist.head);
+                       priv->pmklist.size++;
+               } else {        /* search cache data */
+                       list_for_each(ptr, &priv->pmklist.head) {
+                               pmk = list_entry(ptr, struct pmk_t, list);
+                               if (!memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN)) {      /* match address! list move to head. */
+                                       memcpy(pmk->pmkid, pmksa->pmkid,
+                                              IW_PMKID_LEN);
+                                       list_move(&pmk->list,
+                                                 &priv->pmklist.head);
+                                       break;
+                               }
+                       }
+                       if (ptr == &priv->pmklist.head) {       /* not find address. */
+                               if (PMK_LIST_MAX > priv->pmklist.size) {        /* new cache data */
+                                       for (i = 0; i < PMK_LIST_MAX; i++) {
+                                               pmk = &priv->pmklist.pmk[i];
+                                               if (!memcmp
+                                                   ("\x00\x00\x00\x00\x00\x00",
+                                                    pmk->bssid, ETH_ALEN))
+                                                       break;
+                                       }
+                                       memcpy(pmk->bssid, pmksa->bssid.sa_data,
+                                              ETH_ALEN);
+                                       memcpy(pmk->pmkid, pmksa->pmkid,
+                                              IW_PMKID_LEN);
+                                       list_add(&pmk->list,
+                                                &priv->pmklist.head);
+                                       priv->pmklist.size++;
+                               } else {        /* overwrite old cache data */
+                                       pmk =
+                                           list_entry(priv->pmklist.head.prev,
+                                                      struct pmk_t, list);
+                                       memcpy(pmk->bssid, pmksa->bssid.sa_data,
+                                              ETH_ALEN);
+                                       memcpy(pmk->pmkid, pmksa->pmkid,
+                                              IW_PMKID_LEN);
+                                       list_move(&pmk->list,
+                                                 &priv->pmklist.head);
+                               }
+                       }
+               }
+               break;
+       case IW_PMKSA_REMOVE:
+               if (list_empty(&priv->pmklist.head)) {  /* list empty */
+                       return -EINVAL;
+               } else {        /* search cache data */
+                       list_for_each(ptr, &priv->pmklist.head) {
+                               pmk = list_entry(ptr, struct pmk_t, list);
+                               if (!memcmp(pmksa->bssid.sa_data, pmk->bssid, ETH_ALEN)) {      /* match address! list del. */
+                                       memset(pmk->bssid, 0, ETH_ALEN);
+                                       memset(pmk->pmkid, 0, IW_PMKID_LEN);
+                                       list_del_init(&pmk->list);
+                                       break;
+                               }
+                       }
+                       if (ptr == &priv->pmklist.head) {       /* not find address. */
+                               return 0;
+                       }
+               }
+               break;
+       case IW_PMKSA_FLUSH:
+               memset(&(priv->pmklist), 0, sizeof(priv->pmklist));
+               INIT_LIST_HEAD(&priv->pmklist.head);
+               for (i = 0; i < PMK_LIST_MAX; i++)
+                       INIT_LIST_HEAD(&priv->pmklist.pmk[i].list);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       hostif_sme_enqueue(priv, SME_SET_PMKSA);
+       return 0;
+}
+
+static struct iw_statistics *ks_get_wireless_stats(struct net_device *dev)
+{
+
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_statistics *wstats = &priv->wstats;
+
+       if (!atomic_read(&update_phyinfo)) {
+               if (priv->dev_state < DEVICE_STATE_READY)
+                       return NULL;    /* not finished initialize */
+               else
+                       return wstats;
+       }
+
+       /* Packets discarded in the wireless adapter due to wireless
+        * specific problems */
+       wstats->discard.nwid = 0;       /* Rx invalid nwid      */
+       wstats->discard.code = 0;       /* Rx invalid crypt     */
+       wstats->discard.fragment = 0;   /* Rx invalid frag      */
+       wstats->discard.retries = 0;    /* Tx excessive retries */
+       wstats->discard.misc = 0;       /* Invalid misc         */
+       wstats->miss.beacon = 0;        /* Missed beacon        */
+
+       return wstats;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set stop request */
+static int ks_wlan_set_stop_request(struct net_device *dev,
+                                   struct iw_request_info *info, __u32 * uwrq,
+                                   char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (!(*uwrq))
+               return -EINVAL;
+
+       hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Wireless Handler : set MLME */
+#include <linux/ieee80211.h>
+static int ks_wlan_set_mlme(struct net_device *dev,
+                           struct iw_request_info *info, struct iw_point *dwrq,
+                           char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       struct iw_mlme *mlme = (struct iw_mlme *)extra;
+       __u32 mode;
+
+       DPRINTK(2, ":%d :%d\n", mlme->cmd, mlme->reason_code);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       switch (mlme->cmd) {
+       case IW_MLME_DEAUTH:
+               if (mlme->reason_code == WLAN_REASON_MIC_FAILURE) {
+                       return 0;
+               }
+       case IW_MLME_DISASSOC:
+               mode = 1;
+               return ks_wlan_set_stop_request(dev, NULL, &mode, NULL);
+       default:
+               return -EOPNOTSUPP;     /* Not Support */
+       }
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get firemware version */
+static int ks_wlan_get_firmware_version(struct net_device *dev,
+                                       struct iw_request_info *info,
+                                       struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       strcpy(extra, &(priv->firmware_version[0]));
+       dwrq->length = priv->version_size + 1;
+       return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : set force disconnect status */
+static int ks_wlan_set_detach(struct net_device *dev,
+                             struct iw_request_info *info, __u32 * uwrq,
+                             char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq == CONNECT_STATUS) {  /* 0 */
+               priv->connect_status &= ~FORCE_DISCONNECT;
+               if ((priv->connect_status & CONNECT_STATUS_MASK) ==
+                   CONNECT_STATUS)
+                       netif_carrier_on(dev);
+       } else if (*uwrq == DISCONNECT_STATUS) {        /* 1 */
+               priv->connect_status |= FORCE_DISCONNECT;
+               netif_carrier_off(dev);
+       } else
+               return -EINVAL;
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get force disconnect status */
+static int ks_wlan_get_detach(struct net_device *dev,
+                             struct iw_request_info *info, __u32 * uwrq,
+                             char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = ((priv->connect_status & FORCE_DISCONNECT) ? 1 : 0);
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get connect status */
+static int ks_wlan_get_connect(struct net_device *dev,
+                              struct iw_request_info *info, __u32 * uwrq,
+                              char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = (priv->connect_status & CONNECT_STATUS_MASK);
+       return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/* Private handler : set preamble */
+static int ks_wlan_set_preamble(struct net_device *dev,
+                               struct iw_request_info *info, __u32 * uwrq,
+                               char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq == LONG_PREAMBLE) {   /* 0 */
+               priv->reg.preamble = LONG_PREAMBLE;
+       } else if (*uwrq == SHORT_PREAMBLE) {   /* 1 */
+               priv->reg.preamble = SHORT_PREAMBLE;
+       } else
+               return -EINVAL;
+
+       priv->need_commit |= SME_MODE_SET;
+       return -EINPROGRESS;    /* Call commit handler */
+
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get preamble */
+static int ks_wlan_get_preamble(struct net_device *dev,
+                               struct iw_request_info *info, __u32 * uwrq,
+                               char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->reg.preamble;
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set power save mode */
+static int ks_wlan_set_powermgt(struct net_device *dev,
+                               struct iw_request_info *info, __u32 * uwrq,
+                               char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq == POWMGT_ACTIVE_MODE) {      /* 0 */
+               priv->reg.powermgt = POWMGT_ACTIVE_MODE;
+       } else if (*uwrq == POWMGT_SAVE1_MODE) {        /* 1 */
+               if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+                       priv->reg.powermgt = POWMGT_SAVE1_MODE;
+               else
+                       return -EINVAL;
+       } else if (*uwrq == POWMGT_SAVE2_MODE) {        /* 2 */
+               if (priv->reg.operation_mode == MODE_INFRASTRUCTURE)
+                       priv->reg.powermgt = POWMGT_SAVE2_MODE;
+               else
+                       return -EINVAL;
+       } else
+               return -EINVAL;
+
+       hostif_sme_enqueue(priv, SME_POW_MNGMT_REQUEST);
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get power save made */
+static int ks_wlan_get_powermgt(struct net_device *dev,
+                               struct iw_request_info *info, __u32 * uwrq,
+                               char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->reg.powermgt;
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set scan type */
+static int ks_wlan_set_scan_type(struct net_device *dev,
+                                struct iw_request_info *info, __u32 * uwrq,
+                                char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq == ACTIVE_SCAN) {     /* 0 */
+               priv->reg.scan_type = ACTIVE_SCAN;
+       } else if (*uwrq == PASSIVE_SCAN) {     /* 1 */
+               priv->reg.scan_type = PASSIVE_SCAN;
+       } else
+               return -EINVAL;
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get scan type */
+static int ks_wlan_get_scan_type(struct net_device *dev,
+                                struct iw_request_info *info, __u32 * uwrq,
+                                char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->reg.scan_type;
+       return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : write raw data to device */
+static int ks_wlan_data_write(struct net_device *dev,
+                             struct iw_request_info *info,
+                             struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+       unsigned char *wbuff = NULL;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       wbuff = (unsigned char *)kmalloc(dwrq->length, GFP_ATOMIC);
+       if (!wbuff)
+               return -EFAULT;
+       memcpy(wbuff, extra, dwrq->length);
+
+       /* write to device */
+       ks_wlan_hw_tx(priv, wbuff, dwrq->length, NULL, NULL, NULL);
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : read raw data form device */
+static int ks_wlan_data_read(struct net_device *dev,
+                            struct iw_request_info *info,
+                            struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+       unsigned short read_length;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (!atomic_read(&priv->event_count)) {
+               if (priv->dev_state < DEVICE_STATE_BOOT) {      /* Remove device */
+                       read_length = 4;
+                       memset(extra, 0xff, read_length);
+                       dwrq->length = read_length;
+                       return 0;
+               }
+               read_length = 0;
+               memset(extra, 0, 1);
+               dwrq->length = 0;
+               return 0;
+       }
+
+       if (atomic_read(&priv->event_count) > 0)
+               atomic_dec(&priv->event_count);
+
+       spin_lock(&priv->dev_read_lock);        /* request spin lock */
+
+       /* Copy length max size 0x07ff */
+       if (priv->dev_size[priv->dev_count] > 2047)
+               read_length = 2047;
+       else
+               read_length = priv->dev_size[priv->dev_count];
+
+       /* Copy data */
+       memcpy(extra, &(priv->dev_data[priv->dev_count][0]), read_length);
+
+       spin_unlock(&priv->dev_read_lock);      /* release spin lock */
+
+       /* Initialize */
+       priv->dev_data[priv->dev_count] = 0;
+       priv->dev_size[priv->dev_count] = 0;
+
+       priv->dev_count++;
+       if (priv->dev_count == DEVICE_STOCK_COUNT)
+               priv->dev_count = 0;
+
+       /* Set read size */
+       dwrq->length = read_length;
+
+       return 0;
+}
+#endif
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : get wep string */
+#define WEP_ASCII_BUFF_SIZE (17+64*4+1)
+static int ks_wlan_get_wep_ascii(struct net_device *dev,
+                                struct iw_request_info *info,
+                                struct iw_point *dwrq, char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+       int i, j, len = 0;
+       char tmp[WEP_ASCII_BUFF_SIZE];
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       strcpy(tmp, " WEP keys ASCII \n");
+       len += strlen(" WEP keys ASCII \n");
+
+       for (i = 0; i < 4; i++) {
+               strcpy(tmp + len, "\t[");
+               len += strlen("\t[");
+               tmp[len] = '1' + i;
+               len++;
+               strcpy(tmp + len, "] ");
+               len += strlen("] ");
+               if (priv->reg.wep_key[i].size) {
+                       strcpy(tmp + len,
+                              (priv->reg.wep_key[i].size <
+                               6 ? "(40bits) [" : "(104bits) ["));
+                       len +=
+                           strlen((priv->reg.wep_key[i].size <
+                                   6 ? "(40bits) [" : "(104bits) ["));
+                       for (j = 0; j < priv->reg.wep_key[i].size; j++, len++)
+                               tmp[len] =
+                                   (isprint(priv->reg.wep_key[i].val[j]) ?
+                                    priv->reg.wep_key[i].val[j] : ' ');
+
+                       strcpy(tmp + len, "]\n");
+                       len += strlen("]\n");
+               } else {
+                       strcpy(tmp + len, "off\n");
+                       len += strlen("off\n");
+               }
+       }
+
+       memcpy(extra, tmp, len);
+       dwrq->length = len + 1;
+       return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/* Private handler : set beacon lost count */
+static int ks_wlan_set_beacon_lost(struct net_device *dev,
+                                  struct iw_request_info *info, __u32 * uwrq,
+                                  char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq >= BEACON_LOST_COUNT_MIN && *uwrq <= BEACON_LOST_COUNT_MAX) {
+               priv->reg.beacon_lost_count = *uwrq;
+       } else
+               return -EINVAL;
+
+       if (priv->reg.operation_mode == MODE_INFRASTRUCTURE) {
+               priv->need_commit |= SME_MODE_SET;
+               return -EINPROGRESS;    /* Call commit handler */
+       } else
+               return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get beacon lost count */
+static int ks_wlan_get_beacon_lost(struct net_device *dev,
+                                  struct iw_request_info *info, __u32 * uwrq,
+                                  char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->reg.beacon_lost_count;
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set phy type */
+static int ks_wlan_set_phy_type(struct net_device *dev,
+                               struct iw_request_info *info, __u32 * uwrq,
+                               char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq == D_11B_ONLY_MODE) { /* 0 */
+               priv->reg.phy_type = D_11B_ONLY_MODE;
+       } else if (*uwrq == D_11G_ONLY_MODE) {  /* 1 */
+               priv->reg.phy_type = D_11G_ONLY_MODE;
+       } else if (*uwrq == D_11BG_COMPATIBLE_MODE) {   /* 2 */
+               priv->reg.phy_type = D_11BG_COMPATIBLE_MODE;
+       } else
+               return -EINVAL;
+
+       priv->need_commit |= SME_MODE_SET;
+       return -EINPROGRESS;    /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get phy type */
+static int ks_wlan_get_phy_type(struct net_device *dev,
+                               struct iw_request_info *info, __u32 * uwrq,
+                               char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->reg.phy_type;
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set cts mode */
+static int ks_wlan_set_cts_mode(struct net_device *dev,
+                               struct iw_request_info *info, __u32 * uwrq,
+                               char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq == CTS_MODE_FALSE) {  /* 0 */
+               priv->reg.cts_mode = CTS_MODE_FALSE;
+       } else if (*uwrq == CTS_MODE_TRUE) {    /* 1 */
+               if (priv->reg.phy_type == D_11G_ONLY_MODE ||
+                   priv->reg.phy_type == D_11BG_COMPATIBLE_MODE)
+                       priv->reg.cts_mode = CTS_MODE_TRUE;
+               else
+                       priv->reg.cts_mode = CTS_MODE_FALSE;
+       } else
+               return -EINVAL;
+
+       priv->need_commit |= SME_MODE_SET;
+       return -EINPROGRESS;    /* Call commit handler */
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get cts mode */
+static int ks_wlan_get_cts_mode(struct net_device *dev,
+                               struct iw_request_info *info, __u32 * uwrq,
+                               char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->reg.cts_mode;
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set sleep mode */
+static int ks_wlan_set_sleep_mode(struct net_device *dev,
+                                 struct iw_request_info *info,
+                                 __u32 * uwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       DPRINTK(2, "\n");
+
+       if (*uwrq == SLP_SLEEP) {
+               priv->sleep_mode = *uwrq;
+               printk("SET_SLEEP_MODE %d\n", priv->sleep_mode);
+
+               hostif_sme_enqueue(priv, SME_STOP_REQUEST);
+               hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
+
+       } else if (*uwrq == SLP_ACTIVE) {
+               priv->sleep_mode = *uwrq;
+               printk("SET_SLEEP_MODE %d\n", priv->sleep_mode);
+               hostif_sme_enqueue(priv, SME_SLEEP_REQUEST);
+       } else {
+               printk("SET_SLEEP_MODE %d errror\n", *uwrq);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get sleep mode */
+static int ks_wlan_get_sleep_mode(struct net_device *dev,
+                                 struct iw_request_info *info,
+                                 __u32 * uwrq, char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       DPRINTK(2, "GET_SLEEP_MODE %d\n", priv->sleep_mode);
+       *uwrq = priv->sleep_mode;
+
+       return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : set phy information timer */
+static int ks_wlan_set_phy_information_timer(struct net_device *dev,
+                                            struct iw_request_info *info,
+                                            __u32 * uwrq, char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq >= 0 && *uwrq <= 0xFFFF)      /* 0-65535 */
+               priv->reg.phy_info_timer = (uint16_t) * uwrq;
+       else
+               return -EINVAL;
+
+       hostif_sme_enqueue(priv, SME_PHY_INFO_REQUEST);
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get phy information timer */
+static int ks_wlan_get_phy_information_timer(struct net_device *dev,
+                                            struct iw_request_info *info,
+                                            __u32 * uwrq, char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->reg.phy_info_timer;
+       return 0;
+}
+#endif
+
+#ifdef WPS
+/*------------------------------------------------------------------*/
+/* Private handler : set WPS enable */
+static int ks_wlan_set_wps_enable(struct net_device *dev,
+                                 struct iw_request_info *info, __u32 * uwrq,
+                                 char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq == 0 || *uwrq == 1)
+               priv->wps.wps_enabled = *uwrq;
+       else
+               return -EINVAL;
+
+       hostif_sme_enqueue(priv, SME_WPS_ENABLE_REQUEST);
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get WPS enable */
+static int ks_wlan_get_wps_enable(struct net_device *dev,
+                                 struct iw_request_info *info, __u32 * uwrq,
+                                 char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->wps.wps_enabled;
+       printk("return=%d\n", *uwrq);
+
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set WPS probe req */
+static int ks_wlan_set_wps_probe_req(struct net_device *dev,
+                                    struct iw_request_info *info,
+                                    struct iw_point *dwrq, char *extra)
+{
+       uint8_t *p = extra;
+       unsigned char len;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       DPRINTK(2, "dwrq->length=%d\n", dwrq->length);
+
+       /* length check */
+       if (p[1] + 2 != dwrq->length || dwrq->length > 256) {
+               return -EINVAL;
+       }
+
+       priv->wps.ielen = p[1] + 2 + 1; /* IE header + IE + sizeof(len) */
+       len = p[1] + 2; /* IE header + IE */
+
+       memcpy(priv->wps.ie, &len, sizeof(len));
+       p = memcpy(priv->wps.ie + 1, p, len);
+
+       DPRINTK(2, "%d(%#x): %02X %02X %02X %02X ... %02X %02X %02X\n",
+               priv->wps.ielen, priv->wps.ielen, p[0], p[1], p[2], p[3],
+               p[priv->wps.ielen - 3], p[priv->wps.ielen - 2],
+               p[priv->wps.ielen - 1]);
+
+       hostif_sme_enqueue(priv, SME_WPS_PROBE_REQUEST);
+
+       return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : get WPS probe req */
+static int ks_wlan_get_wps_probe_req(struct net_device *dev,
+                                    struct iw_request_info *info,
+                                    __u32 * uwrq, char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+       DPRINTK(2, "\n");
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       return 0;
+}
+#endif
+#endif /* WPS */
+
+/*------------------------------------------------------------------*/
+/* Private handler : set tx gain control value */
+static int ks_wlan_set_tx_gain(struct net_device *dev,
+                              struct iw_request_info *info, __u32 * uwrq,
+                              char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq >= 0 && *uwrq <= 0xFF)        /* 0-255 */
+               priv->gain.TxGain = (uint8_t) * uwrq;
+       else
+               return -EINVAL;
+
+       if (priv->gain.TxGain < 0xFF)
+               priv->gain.TxMode = 1;
+       else
+               priv->gain.TxMode = 0;
+
+       hostif_sme_enqueue(priv, SME_SET_GAIN);
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get tx gain control value */
+static int ks_wlan_get_tx_gain(struct net_device *dev,
+                              struct iw_request_info *info, __u32 * uwrq,
+                              char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->gain.TxGain;
+       hostif_sme_enqueue(priv, SME_GET_GAIN);
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : set rx gain control value */
+static int ks_wlan_set_rx_gain(struct net_device *dev,
+                              struct iw_request_info *info, __u32 * uwrq,
+                              char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq >= 0 && *uwrq <= 0xFF)        /* 0-255 */
+               priv->gain.RxGain = (uint8_t) * uwrq;
+       else
+               return -EINVAL;
+
+       if (priv->gain.RxGain < 0xFF)
+               priv->gain.RxMode = 1;
+       else
+               priv->gain.RxMode = 0;
+
+       hostif_sme_enqueue(priv, SME_SET_GAIN);
+       return 0;
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get rx gain control value */
+static int ks_wlan_get_rx_gain(struct net_device *dev,
+                              struct iw_request_info *info, __u32 * uwrq,
+                              char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       *uwrq = priv->gain.RxGain;
+       hostif_sme_enqueue(priv, SME_GET_GAIN);
+       return 0;
+}
+
+#if 0
+/*------------------------------------------------------------------*/
+/* Private handler : set region value */
+static int ks_wlan_set_region(struct net_device *dev,
+                             struct iw_request_info *info, __u32 * uwrq,
+                             char *extra)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)dev->priv;
+
+       if (priv->sleep_mode == SLP_SLEEP) {
+               return -EPERM;
+       }
+       /* for SLEEP MODE */
+       if (*uwrq >= 0x9 && *uwrq <= 0xF)       /* 0x9-0xf */
+               priv->region = (uint8_t) * uwrq;
+       else
+               return -EINVAL;
+
+       hostif_sme_enqueue(priv, SME_SET_REGION);
+       return 0;
+}
+#endif
+
+/*------------------------------------------------------------------*/
+/* Private handler : get eeprom checksum result */
+static int ks_wlan_get_eeprom_cksum(struct net_device *dev,
+                                   struct iw_request_info *info, __u32 * uwrq,
+                                   char *extra)
+{
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       *uwrq = priv->eeprom_checksum;
+       return 0;
+}
+
+static void print_hif_event(int event)
+{
+
+       switch (event) {
+       case HIF_DATA_REQ:
+               printk("HIF_DATA_REQ\n");
+               break;
+       case HIF_DATA_IND:
+               printk("HIF_DATA_IND\n");
+               break;
+       case HIF_MIB_GET_REQ:
+               printk("HIF_MIB_GET_REQ\n");
+               break;
+       case HIF_MIB_GET_CONF:
+               printk("HIF_MIB_GET_CONF\n");
+               break;
+       case HIF_MIB_SET_REQ:
+               printk("HIF_MIB_SET_REQ\n");
+               break;
+       case HIF_MIB_SET_CONF:
+               printk("HIF_MIB_SET_CONF\n");
+               break;
+       case HIF_POWERMGT_REQ:
+               printk("HIF_POWERMGT_REQ\n");
+               break;
+       case HIF_POWERMGT_CONF:
+               printk("HIF_POWERMGT_CONF\n");
+               break;
+       case HIF_START_REQ:
+               printk("HIF_START_REQ\n");
+               break;
+       case HIF_START_CONF:
+               printk("HIF_START_CONF\n");
+               break;
+       case HIF_CONNECT_IND:
+               printk("HIF_CONNECT_IND\n");
+               break;
+       case HIF_STOP_REQ:
+               printk("HIF_STOP_REQ\n");
+               break;
+       case HIF_STOP_CONF:
+               printk("HIF_STOP_CONF\n");
+               break;
+       case HIF_PS_ADH_SET_REQ:
+               printk("HIF_PS_ADH_SET_REQ\n");
+               break;
+       case HIF_PS_ADH_SET_CONF:
+               printk("HIF_PS_ADH_SET_CONF\n");
+               break;
+       case HIF_INFRA_SET_REQ:
+               printk("HIF_INFRA_SET_REQ\n");
+               break;
+       case HIF_INFRA_SET_CONF:
+               printk("HIF_INFRA_SET_CONF\n");
+               break;
+       case HIF_ADH_SET_REQ:
+               printk("HIF_ADH_SET_REQ\n");
+               break;
+       case HIF_ADH_SET_CONF:
+               printk("HIF_ADH_SET_CONF\n");
+               break;
+       case HIF_AP_SET_REQ:
+               printk("HIF_AP_SET_REQ\n");
+               break;
+       case HIF_AP_SET_CONF:
+               printk("HIF_AP_SET_CONF\n");
+               break;
+       case HIF_ASSOC_INFO_IND:
+               printk("HIF_ASSOC_INFO_IND\n");
+               break;
+       case HIF_MIC_FAILURE_REQ:
+               printk("HIF_MIC_FAILURE_REQ\n");
+               break;
+       case HIF_MIC_FAILURE_CONF:
+               printk("HIF_MIC_FAILURE_CONF\n");
+               break;
+       case HIF_SCAN_REQ:
+               printk("HIF_SCAN_REQ\n");
+               break;
+       case HIF_SCAN_CONF:
+               printk("HIF_SCAN_CONF\n");
+               break;
+       case HIF_PHY_INFO_REQ:
+               printk("HIF_PHY_INFO_REQ\n");
+               break;
+       case HIF_PHY_INFO_CONF:
+               printk("HIF_PHY_INFO_CONF\n");
+               break;
+       case HIF_SLEEP_REQ:
+               printk("HIF_SLEEP_REQ\n");
+               break;
+       case HIF_SLEEP_CONF:
+               printk("HIF_SLEEP_CONF\n");
+               break;
+       case HIF_PHY_INFO_IND:
+               printk("HIF_PHY_INFO_IND\n");
+               break;
+       case HIF_SCAN_IND:
+               printk("HIF_SCAN_IND\n");
+               break;
+       case HIF_INFRA_SET2_REQ:
+               printk("HIF_INFRA_SET2_REQ\n");
+               break;
+       case HIF_INFRA_SET2_CONF:
+               printk("HIF_INFRA_SET2_CONF\n");
+               break;
+       case HIF_ADH_SET2_REQ:
+               printk("HIF_ADH_SET2_REQ\n");
+               break;
+       case HIF_ADH_SET2_CONF:
+               printk("HIF_ADH_SET2_CONF\n");
+       }
+}
+
+/*------------------------------------------------------------------*/
+/* Private handler : get host command history */
+static int ks_wlan_hostt(struct net_device *dev, struct iw_request_info *info,
+                        __u32 * uwrq, char *extra)
+{
+       int i, event;
+       struct ks_wlan_private *priv =
+           (struct ks_wlan_private *)netdev_priv(dev);
+
+       for (i = 63; i >= 0; i--) {
+               event =
+                   priv->hostt.buff[(priv->hostt.qtail - 1 - i) %
+                                    SME_EVENT_BUFF_SIZE];
+               print_hif_event(event);
+       }
+       return 0;
+}
+
+/* Structures to export the Wireless Handlers */
+
+static const struct iw_priv_args ks_wlan_private_args[] = {
+/*{ cmd, set_args, get_args, name[16] } */
+       {KS_WLAN_GET_FIRM_VERSION, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_CHAR | (128 + 1), "GetFirmwareVer"},
+#ifdef WPS
+       {KS_WLAN_SET_WPS_ENABLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetWPSEnable"},
+       {KS_WLAN_GET_WPS_ENABLE, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetW"},
+       {KS_WLAN_SET_WPS_PROBE_REQ, IW_PRIV_TYPE_BYTE | 2047, IW_PRIV_TYPE_NONE,
+        "SetWPSProbeReq"},
+#endif /* WPS */
+       {KS_WLAN_SET_PREAMBLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetPreamble"},
+       {KS_WLAN_GET_PREAMBLE, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPreamble"},
+       {KS_WLAN_SET_POWER_SAVE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetPowerSave"},
+       {KS_WLAN_GET_POWER_SAVE, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPowerSave"},
+       {KS_WLAN_SET_SCAN_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetScanType"},
+       {KS_WLAN_GET_SCAN_TYPE, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetScanType"},
+       {KS_WLAN_SET_RX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetRxGain"},
+       {KS_WLAN_GET_RX_GAIN, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetRxGain"},
+       {KS_WLAN_HOSTT, IW_PRIV_TYPE_NONE, IW_PRIV_TYPE_CHAR | (128 + 1),
+        "hostt"},
+       {KS_WLAN_SET_BEACON_LOST, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetBeaconLost"},
+       {KS_WLAN_GET_BEACON_LOST, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetBeaconLost"},
+       {KS_WLAN_SET_SLEEP_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetSleepMode"},
+       {KS_WLAN_GET_SLEEP_MODE, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetSleepMode"},
+       {KS_WLAN_SET_TX_GAIN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetTxGain"},
+       {KS_WLAN_GET_TX_GAIN, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetTxGain"},
+       {KS_WLAN_SET_PHY_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetPhyType"},
+       {KS_WLAN_GET_PHY_TYPE, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetPhyType"},
+       {KS_WLAN_SET_CTS_MODE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+        IW_PRIV_TYPE_NONE, "SetCtsMode"},
+       {KS_WLAN_GET_CTS_MODE, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetCtsMode"},
+       {KS_WLAN_GET_EEPROM_CKSUM, IW_PRIV_TYPE_NONE,
+        IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "GetChecksum"},
+};
+
+static const iw_handler ks_wlan_handler[] = {
+       (iw_handler) ks_wlan_config_commit,     /* SIOCSIWCOMMIT */
+       (iw_handler) ks_wlan_get_name,  /* SIOCGIWNAME */
+       (iw_handler) NULL,      /* SIOCSIWNWID */
+       (iw_handler) NULL,      /* SIOCGIWNWID */
+       (iw_handler) ks_wlan_set_freq,  /* SIOCSIWFREQ */
+       (iw_handler) ks_wlan_get_freq,  /* SIOCGIWFREQ */
+       (iw_handler) ks_wlan_set_mode,  /* SIOCSIWMODE */
+       (iw_handler) ks_wlan_get_mode,  /* SIOCGIWMODE */
+#ifndef KSC_OPNOTSUPP
+       (iw_handler) ks_wlan_set_sens,  /* SIOCSIWSENS */
+       (iw_handler) ks_wlan_get_sens,  /* SIOCGIWSENS */
+#else /* KSC_OPNOTSUPP */
+       (iw_handler) NULL,      /* SIOCSIWSENS */
+       (iw_handler) NULL,      /* SIOCGIWSENS */
+#endif /* KSC_OPNOTSUPP */
+       (iw_handler) NULL,      /* SIOCSIWRANGE */
+       (iw_handler) ks_wlan_get_range, /* SIOCGIWRANGE */
+       (iw_handler) NULL,      /* SIOCSIWPRIV */
+       (iw_handler) NULL,      /* SIOCGIWPRIV */
+       (iw_handler) NULL,      /* SIOCSIWSTATS */
+       (iw_handler) ks_wlan_get_iwstats,       /* SIOCGIWSTATS */
+       (iw_handler) NULL,      /* SIOCSIWSPY */
+       (iw_handler) NULL,      /* SIOCGIWSPY */
+       (iw_handler) NULL,      /* SIOCSIWTHRSPY */
+       (iw_handler) NULL,      /* SIOCGIWTHRSPY */
+       (iw_handler) ks_wlan_set_wap,   /* SIOCSIWAP */
+       (iw_handler) ks_wlan_get_wap,   /* SIOCGIWAP */
+//      (iw_handler) NULL,                      /* SIOCSIWMLME */
+       (iw_handler) ks_wlan_set_mlme,  /* SIOCSIWMLME */
+       (iw_handler) ks_wlan_get_aplist,        /* SIOCGIWAPLIST */
+       (iw_handler) ks_wlan_set_scan,  /* SIOCSIWSCAN */
+       (iw_handler) ks_wlan_get_scan,  /* SIOCGIWSCAN */
+       (iw_handler) ks_wlan_set_essid, /* SIOCSIWESSID */
+       (iw_handler) ks_wlan_get_essid, /* SIOCGIWESSID */
+       (iw_handler) ks_wlan_set_nick,  /* SIOCSIWNICKN */
+       (iw_handler) ks_wlan_get_nick,  /* SIOCGIWNICKN */
+       (iw_handler) NULL,      /* -- hole -- */
+       (iw_handler) NULL,      /* -- hole -- */
+       (iw_handler) ks_wlan_set_rate,  /* SIOCSIWRATE */
+       (iw_handler) ks_wlan_get_rate,  /* SIOCGIWRATE */
+       (iw_handler) ks_wlan_set_rts,   /* SIOCSIWRTS */
+       (iw_handler) ks_wlan_get_rts,   /* SIOCGIWRTS */
+       (iw_handler) ks_wlan_set_frag,  /* SIOCSIWFRAG */
+       (iw_handler) ks_wlan_get_frag,  /* SIOCGIWFRAG */
+#ifndef KSC_OPNOTSUPP
+       (iw_handler) ks_wlan_set_txpow, /* SIOCSIWTXPOW */
+       (iw_handler) ks_wlan_get_txpow, /* SIOCGIWTXPOW */
+       (iw_handler) ks_wlan_set_retry, /* SIOCSIWRETRY */
+       (iw_handler) ks_wlan_get_retry, /* SIOCGIWRETRY */
+#else /* KSC_OPNOTSUPP */
+       (iw_handler) NULL,      /* SIOCSIWTXPOW */
+       (iw_handler) NULL,      /* SIOCGIWTXPOW */
+       (iw_handler) NULL,      /* SIOCSIWRETRY */
+       (iw_handler) NULL,      /* SIOCGIWRETRY */
+#endif /* KSC_OPNOTSUPP */
+       (iw_handler) ks_wlan_set_encode,        /* SIOCSIWENCODE */
+       (iw_handler) ks_wlan_get_encode,        /* SIOCGIWENCODE */
+       (iw_handler) ks_wlan_set_power, /* SIOCSIWPOWER */
+       (iw_handler) ks_wlan_get_power, /* SIOCGIWPOWER */
+       (iw_handler) NULL,      /* -- hole -- */
+       (iw_handler) NULL,      /* -- hole -- */
+//      (iw_handler) NULL,                      /* SIOCSIWGENIE */
+       (iw_handler) ks_wlan_set_genie, /* SIOCSIWGENIE */
+       (iw_handler) NULL,      /* SIOCGIWGENIE */
+       (iw_handler) ks_wlan_set_auth_mode,     /* SIOCSIWAUTH */
+       (iw_handler) ks_wlan_get_auth_mode,     /* SIOCGIWAUTH */
+       (iw_handler) ks_wlan_set_encode_ext,    /* SIOCSIWENCODEEXT */
+       (iw_handler) ks_wlan_get_encode_ext,    /* SIOCGIWENCODEEXT */
+       (iw_handler) ks_wlan_set_pmksa, /* SIOCSIWPMKSA */
+       (iw_handler) NULL,      /* -- hole -- */
+};
+
+/* private_handler */
+static const iw_handler ks_wlan_private_handler[] = {
+       (iw_handler) NULL,      /*  0 */
+       (iw_handler) NULL,      /*  1, used to be: KS_WLAN_GET_DRIVER_VERSION */
+       (iw_handler) NULL,      /*  2 */
+       (iw_handler) ks_wlan_get_firmware_version,      /*  3 KS_WLAN_GET_FIRM_VERSION */
+#ifdef WPS
+       (iw_handler) ks_wlan_set_wps_enable,    /*  4 KS_WLAN_SET_WPS_ENABLE  */
+       (iw_handler) ks_wlan_get_wps_enable,    /*  5 KS_WLAN_GET_WPS_ENABLE  */
+       (iw_handler) ks_wlan_set_wps_probe_req, /*  6 KS_WLAN_SET_WPS_PROBE_REQ */
+#else
+       (iw_handler) NULL,      /*  4 */
+       (iw_handler) NULL,      /*  5 */
+       (iw_handler) NULL,      /*  6 */
+#endif /* WPS */
+
+       (iw_handler) ks_wlan_get_eeprom_cksum,  /*  7 KS_WLAN_GET_CONNECT */
+       (iw_handler) ks_wlan_set_preamble,      /*  8 KS_WLAN_SET_PREAMBLE */
+       (iw_handler) ks_wlan_get_preamble,      /*  9 KS_WLAN_GET_PREAMBLE */
+       (iw_handler) ks_wlan_set_powermgt,      /* 10 KS_WLAN_SET_POWER_SAVE */
+       (iw_handler) ks_wlan_get_powermgt,      /* 11 KS_WLAN_GET_POWER_SAVE */
+       (iw_handler) ks_wlan_set_scan_type,     /* 12 KS_WLAN_SET_SCAN_TYPE */
+       (iw_handler) ks_wlan_get_scan_type,     /* 13 KS_WLAN_GET_SCAN_TYPE */
+       (iw_handler) ks_wlan_set_rx_gain,       /* 14 KS_WLAN_SET_RX_GAIN */
+       (iw_handler) ks_wlan_get_rx_gain,       /* 15 KS_WLAN_GET_RX_GAIN */
+       (iw_handler) ks_wlan_hostt,     /* 16 KS_WLAN_HOSTT */
+       (iw_handler) NULL,      /* 17 */
+       (iw_handler) ks_wlan_set_beacon_lost,   /* 18 KS_WLAN_SET_BECAN_LOST */
+       (iw_handler) ks_wlan_get_beacon_lost,   /* 19 KS_WLAN_GET_BECAN_LOST */
+       (iw_handler) ks_wlan_set_tx_gain,       /* 20 KS_WLAN_SET_TX_GAIN */
+       (iw_handler) ks_wlan_get_tx_gain,       /* 21 KS_WLAN_GET_TX_GAIN */
+       (iw_handler) ks_wlan_set_phy_type,      /* 22 KS_WLAN_SET_PHY_TYPE */
+       (iw_handler) ks_wlan_get_phy_type,      /* 23 KS_WLAN_GET_PHY_TYPE */
+       (iw_handler) ks_wlan_set_cts_mode,      /* 24 KS_WLAN_SET_CTS_MODE */
+       (iw_handler) ks_wlan_get_cts_mode,      /* 25 KS_WLAN_GET_CTS_MODE */
+       (iw_handler) NULL,      /* 26 */
+       (iw_handler) NULL,      /* 27 */
+       (iw_handler) ks_wlan_set_sleep_mode,    /* 28 KS_WLAN_SET_SLEEP_MODE */
+       (iw_handler) ks_wlan_get_sleep_mode,    /* 29 KS_WLAN_GET_SLEEP_MODE */
+       (iw_handler) NULL,      /* 30 */
+       (iw_handler) NULL,      /* 31 */
+};
+
+static const struct iw_handler_def ks_wlan_handler_def = {
+       .num_standard = sizeof(ks_wlan_handler) / sizeof(iw_handler),
+       .num_private = sizeof(ks_wlan_private_handler) / sizeof(iw_handler),
+       .num_private_args =
+           sizeof(ks_wlan_private_args) / sizeof(struct iw_priv_args),
+       .standard = (iw_handler *) ks_wlan_handler,
+       .private = (iw_handler *) ks_wlan_private_handler,
+       .private_args = (struct iw_priv_args *)ks_wlan_private_args,
+       .get_wireless_stats = ks_get_wireless_stats,
+};
+
+static int ks_wlan_netdev_ioctl(struct net_device *dev, struct ifreq *rq,
+                               int cmd)
+{
+       int rc = 0;
+       struct iwreq *wrq = (struct iwreq *)rq;
+       switch (cmd) {
+       case SIOCIWFIRSTPRIV + 20:      /* KS_WLAN_SET_STOP_REQ */
+               rc = ks_wlan_set_stop_request(dev, NULL, &(wrq->u.mode), NULL);
+               break;
+               // All other calls are currently unsupported
+       default:
+               rc = -EOPNOTSUPP;
+       }
+
+       DPRINTK(5, "return=%d\n", rc);
+       return rc;
+}
+
+static
+struct net_device_stats *ks_wlan_get_stats(struct net_device *dev)
+{
+       struct ks_wlan_private *priv = netdev_priv(dev);
+
+       if (priv->dev_state < DEVICE_STATE_READY) {
+               return NULL;    /* not finished initialize */
+       }
+
+       return &priv->nstats;
+}
+
+static
+int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
+{
+       struct ks_wlan_private *priv = netdev_priv(dev);
+       struct sockaddr *mac_addr = (struct sockaddr *)addr;
+       if (netif_running(dev))
+               return -EBUSY;
+       memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+       memcpy(priv->eth_addr, mac_addr->sa_data, ETH_ALEN);
+
+       priv->mac_address_valid = 0;
+       hostif_sme_enqueue(priv, SME_MACADDRESS_SET_REQUEST);
+       printk(KERN_INFO
+              "ks_wlan: MAC ADDRESS = %02x:%02x:%02x:%02x:%02x:%02x\n",
+              priv->eth_addr[0], priv->eth_addr[1], priv->eth_addr[2],
+              priv->eth_addr[3], priv->eth_addr[4], priv->eth_addr[5]);
+       return 0;
+}
+
+static
+void ks_wlan_tx_timeout(struct net_device *dev)
+{
+       struct ks_wlan_private *priv = netdev_priv(dev);
+
+       DPRINTK(1, "head(%d) tail(%d)!!\n", priv->tx_dev.qhead,
+               priv->tx_dev.qtail);
+       if (!netif_queue_stopped(dev)) {
+               netif_stop_queue(dev);
+       }
+       priv->nstats.tx_errors++;
+       netif_wake_queue(dev);
+
+       return;
+}
+
+static
+int ks_wlan_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct ks_wlan_private *priv = netdev_priv(dev);
+       int rc = 0;
+
+       DPRINTK(3, "in_interrupt()=%ld\n", in_interrupt());
+
+       if (skb == NULL) {
+               printk(KERN_ERR "ks_wlan:  skb == NULL!!!\n");
+               return 0;
+       }
+       if (priv->dev_state < DEVICE_STATE_READY) {
+               dev_kfree_skb(skb);
+               return 0;       /* not finished initialize */
+       }
+
+       if (netif_running(dev))
+               netif_stop_queue(dev);
+
+       rc = hostif_data_request(priv, skb);
+       netif_trans_update(dev);
+
+       DPRINTK(4, "rc=%d\n", rc);
+       if (rc) {
+               rc = 0;
+       }
+
+       return rc;
+}
+
+void send_packet_complete(void *arg1, void *arg2)
+{
+       struct ks_wlan_private *priv = (struct ks_wlan_private *)arg1;
+       struct sk_buff *packet = (struct sk_buff *)arg2;
+
+       DPRINTK(3, "\n");
+
+       priv->nstats.tx_bytes += packet->len;
+       priv->nstats.tx_packets++;
+
+       if (netif_queue_stopped(priv->net_dev))
+               netif_wake_queue(priv->net_dev);
+
+       if (packet) {
+               dev_kfree_skb(packet);
+               packet = NULL;
+       }
+
+}
+
+/* Set or clear the multicast filter for this adaptor.
+   This routine is not state sensitive and need not be SMP locked. */
+static
+void ks_wlan_set_multicast_list(struct net_device *dev)
+{
+       struct ks_wlan_private *priv = netdev_priv(dev);
+
+       DPRINTK(4, "\n");
+       if (priv->dev_state < DEVICE_STATE_READY) {
+               return; /* not finished initialize */
+       }
+       hostif_sme_enqueue(priv, SME_MULTICAST_REQUEST);
+
+       return;
+}
+
+static
+int ks_wlan_open(struct net_device *dev)
+{
+       struct ks_wlan_private *priv = netdev_priv(dev);
+
+       priv->cur_rx = 0;
+
+       if (!priv->mac_address_valid) {
+               printk(KERN_ERR "ks_wlan : %s Not READY !!\n", dev->name);
+               return -EBUSY;
+       } else
+               netif_start_queue(dev);
+
+       return 0;
+}
+
+static
+int ks_wlan_close(struct net_device *dev)
+{
+
+       netif_stop_queue(dev);
+
+       DPRINTK(4, "%s: Shutting down ethercard, status was 0x%4.4x.\n",
+               dev->name, 0x00);
+
+       return 0;
+}
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (3*HZ)
+static const unsigned char dummy_addr[] =
+    { 0x00, 0x0b, 0xe3, 0x00, 0x00, 0x00 };
+
+static const struct net_device_ops ks_wlan_netdev_ops = {
+       .ndo_start_xmit = ks_wlan_start_xmit,
+       .ndo_open = ks_wlan_open,
+       .ndo_stop = ks_wlan_close,
+       .ndo_do_ioctl = ks_wlan_netdev_ioctl,
+       .ndo_set_mac_address = ks_wlan_set_mac_address,
+       .ndo_get_stats = ks_wlan_get_stats,
+       .ndo_tx_timeout = ks_wlan_tx_timeout,
+       .ndo_set_rx_mode = ks_wlan_set_multicast_list,
+};
+
+int ks_wlan_net_start(struct net_device *dev)
+{
+       struct ks_wlan_private *priv;
+       /* int rc; */
+
+       priv = netdev_priv(dev);
+       priv->mac_address_valid = 0;
+       priv->need_commit = 0;
+
+       priv->device_open_status = 1;
+
+       /* phy information update timer */
+       atomic_set(&update_phyinfo, 0);
+       init_timer(&update_phyinfo_timer);
+       update_phyinfo_timer.function = ks_wlan_update_phyinfo_timeout;
+       update_phyinfo_timer.data = (unsigned long)priv;
+
+       /* dummy address set */
+       memcpy(priv->eth_addr, dummy_addr, ETH_ALEN);
+       dev->dev_addr[0] = priv->eth_addr[0];
+       dev->dev_addr[1] = priv->eth_addr[1];
+       dev->dev_addr[2] = priv->eth_addr[2];
+       dev->dev_addr[3] = priv->eth_addr[3];
+       dev->dev_addr[4] = priv->eth_addr[4];
+       dev->dev_addr[5] = priv->eth_addr[5];
+       dev->dev_addr[6] = 0x00;
+       dev->dev_addr[7] = 0x00;
+
+       /* The ks_wlan-specific entries in the device structure. */
+       dev->netdev_ops = &ks_wlan_netdev_ops;
+       dev->wireless_handlers = (struct iw_handler_def *)&ks_wlan_handler_def;
+       dev->watchdog_timeo = TX_TIMEOUT;
+
+       netif_carrier_off(dev);
+
+       return 0;
+}
+
+int ks_wlan_net_stop(struct net_device *dev)
+{
+       struct ks_wlan_private *priv = netdev_priv(dev);
+
+       int ret = 0;
+       priv->device_open_status = 0;
+       del_timer_sync(&update_phyinfo_timer);
+
+       if (netif_running(dev))
+               netif_stop_queue(dev);
+
+       return ret;
+}
+
+int ks_wlan_reset(struct net_device *dev)
+{
+       return 0;
+}
diff --git a/drivers/staging/ks7010/michael_mic.c b/drivers/staging/ks7010/michael_mic.c
new file mode 100644 (file)
index 0000000..e14c109
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ *   Driver for KeyStream wireless LAN
+ *
+ *   Copyright (C) 2005-2008 KeyStream Corp.
+ *   Copyright (C) 2009 Renesas Technology Corp.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License version 2 as
+ *   published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include "michael_mic.h"
+
+// Rotation functions on 32 bit values
+#define ROL32( A, n )  ( ((A) << (n)) | ( ((A)>>(32-(n))) & ( (1UL << (n)) - 1 ) ) )
+#define ROR32( A, n )  ROL32( (A), 32-(n) )
+// Convert from Byte[] to UInt32 in a portable way
+#define getUInt32( A, B )      (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
+
+// Convert from UInt32 to Byte[] in a portable way
+#define putUInt32( A, B, C )   A[B+0] = (uint8_t) (C & 0xff);          \
+                               A[B+1] = (uint8_t) ((C>>8) & 0xff);     \
+                               A[B+2] = (uint8_t) ((C>>16) & 0xff);    \
+                               A[B+3] = (uint8_t) ((C>>24) & 0xff)
+
+// Reset the state to the empty message.
+#define MichaelClear( A )      A->L = A->K0; \
+                               A->R = A->K1; \
+                               A->nBytesInM = 0;
+
+static
+void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t * key)
+{
+       // Set the key
+       Mic->K0 = getUInt32(key, 0);
+       Mic->K1 = getUInt32(key, 4);
+
+       //clear();
+       MichaelClear(Mic);
+}
+
+#define MichaelBlockFunction(L, R)                             \
+do{                                                            \
+       R ^= ROL32( L, 17 );                                    \
+       L += R;                                                 \
+       R ^= ((L & 0xff00ff00) >> 8) | ((L & 0x00ff00ff) << 8); \
+       L += R;                                                 \
+       R ^= ROL32( L, 3 );                                     \
+       L += R;                                                 \
+       R ^= ROR32( L, 2 );                                     \
+       L += R;                                                 \
+}while(0)
+
+static
+void MichaelAppend(struct michel_mic_t *Mic, uint8_t * src, int nBytes)
+{
+       int addlen;
+       if (Mic->nBytesInM) {
+               addlen = 4 - Mic->nBytesInM;
+               if (addlen > nBytes)
+                       addlen = nBytes;
+               memcpy(&Mic->M[Mic->nBytesInM], src, addlen);
+               Mic->nBytesInM += addlen;
+               src += addlen;
+               nBytes -= addlen;
+
+               if (Mic->nBytesInM < 4)
+                       return;
+
+               Mic->L ^= getUInt32(Mic->M, 0);
+               MichaelBlockFunction(Mic->L, Mic->R);
+               Mic->nBytesInM = 0;
+       }
+
+       while (nBytes >= 4) {
+               Mic->L ^= getUInt32(src, 0);
+               MichaelBlockFunction(Mic->L, Mic->R);
+               src += 4;
+               nBytes -= 4;
+       }
+
+       if (nBytes > 0) {
+               Mic->nBytesInM = nBytes;
+               memcpy(Mic->M, src, nBytes);
+       }
+}
+
+static
+void MichaelGetMIC(struct michel_mic_t *Mic, uint8_t * dst)
+{
+       uint8_t *data = Mic->M;
+       switch (Mic->nBytesInM) {
+       case 0:
+               Mic->L ^= 0x5a;
+               break;
+       case 1:
+               Mic->L ^= data[0] | 0x5a00;
+               break;
+       case 2:
+               Mic->L ^= data[0] | (data[1] << 8) | 0x5a0000;
+               break;
+       case 3:
+               Mic->L ^= data[0] | (data[1] << 8) | (data[2] << 16) |
+                   0x5a000000;
+               break;
+       }
+       MichaelBlockFunction(Mic->L, Mic->R);
+       MichaelBlockFunction(Mic->L, Mic->R);
+       // The appendByte function has already computed the result.
+       putUInt32(dst, 0, Mic->L);
+       putUInt32(dst, 4, Mic->R);
+
+       // Reset to the empty message.
+       MichaelClear(Mic);
+}
+
+void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t * Key,
+                       uint8_t * Data, int Len, uint8_t priority,
+                       uint8_t * Result)
+{
+       uint8_t pad_data[4] = { priority, 0, 0, 0 };
+       // Compute the MIC value
+       /*
+        * IEEE802.11i  page 47
+        * Figure 43g TKIP MIC processing format
+        * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+        * |6 |6 |1       |3 |M   |1 |1 |1 |1 |1 |1 |1 |1 | Octet
+        * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+        * |DA|SA|Priority|0 |Data|M0|M1|M2|M3|M4|M5|M6|M7|
+        * +--+--+--------+--+----+--+--+--+--+--+--+--+--+
+        */
+       MichaelInitializeFunction(Mic, Key);
+       MichaelAppend(Mic, (uint8_t *) Data, 12);       /* |DA|SA| */
+       MichaelAppend(Mic, pad_data, 4);        /* |Priority|0|0|0| */
+       MichaelAppend(Mic, (uint8_t *) (Data + 12), Len - 12);  /* |Data| */
+       MichaelGetMIC(Mic, Result);
+}
diff --git a/drivers/staging/ks7010/michael_mic.h b/drivers/staging/ks7010/michael_mic.h
new file mode 100644 (file)
index 0000000..c7e4eb2
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ *   Driver for KeyStream wireless LAN
+ *
+ *   Copyright (C) 2005-2008 KeyStream Corp.
+ *   Copyright (C) 2009 Renesas Technology Corp.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License version 2 as
+ *   published by the Free Software Foundation.
+ */
+
+/* MichelMIC routine define */
+struct michel_mic_t {
+       uint32_t K0;    // Key 
+       uint32_t K1;    // Key 
+       uint32_t L;     // Current state 
+       uint32_t R;     // Current state 
+       uint8_t M[4];   // Message accumulator (single word) 
+       int nBytesInM;  // # bytes in M 
+       uint8_t Result[8];
+};
+
+extern
+void MichaelMICFunction(struct michel_mic_t *Mic, uint8_t * Key,
+                       uint8_t * Data, int Len, uint8_t priority,
+                       uint8_t * Result);
index 1edfca58c1c60d0c6e98bb1c510c3822bf543959..be0675d8ff5e00fb689394476c330ed7803b8ebe 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 4141afb101bb21d8a2382e4a6aae38e1c86c2fc8..3f6447c650424f08523f3a12649019f5905ede56 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 455c54d0d17ce19852d8bde42d2ab36b46206048..25adab19fd8665245f72cc3dde33be559e5e6a78 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 2e008bffc89a92eabad55dc96f4e2b030f25416e..d3f9a6020ee3ba1b58d6501125c53c52dfed71bb 100644 (file)
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see http://www.gnu.org/licenses
  *
- * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
- * CA 94065 USA or visit www.oracle.com if you need additional information or
- * have any questions.
- *
  * GPL HEADER END
  */
 /*
index 119986bc79612232bd699e130dbfb06ca089b1b7..6949a184663527263ad44c9add33cb7e7fd0897d 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 4b9102bd95d5a85b4c178d8654661e8f28bcee95..cce6b58e3682ed77ab1b96cdade56362573e8ecc 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index ac4e8cfe6c8cdf37a666423ef692c35e7b2694fa..8c75d5075590197529dc7c674ef395c55d7c0d7b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 2fd2a9690a341b117d4fb3a7f460d3364c1afcd6..4daa3823f60a51f9b24613447c8da106ff6f8ec9 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e02cde5aeca1f9c54fdcdc6dd0a5e5ed542a31fb..0ee60ff336f2dada891bb7750b5d96c36112c046 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 2c7ec2d28f38fb36dcded7b9b66a75d0d3d2257f..008da4497bda3ff5a495b3d1d1f42f8c7afea27c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index f9b20c5accbf4c0966247d9ece60ab5dc5d7b05e..a7e1340e69a1d87c050945adf3180a710b2ab96b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index a268ef7aa19daa9a703623465442fdb76c7be473..e8695e4a39d178b3b9ef1a419c30e11211cb379f 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 7656b09b875234e7fffda84355a64821a0b63795..b646acd1f7e75b5bf150c939457f7bacd9678728 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 6ce9accb91ad384428863b25321b57a1b3c7e5df..dfff17088403ec4712fbfcf65ab44f1b7eacb218 100644 (file)
@@ -35,7 +35,7 @@
 #define MAX_NUM_SHOW_ENTRIES   32
 #define LNET_MAX_STR_LEN       128
 #define LNET_MAX_SHOW_NUM_CPT  128
-#define LNET_UNDEFINED_HOPS    ((__u32) -1)
+#define LNET_UNDEFINED_HOPS    ((__u32)(-1))
 
 struct lnet_ioctl_config_lnd_cmn_tunables {
        __u32 lct_version;
index 24c4a08e6dc6f0495207e16cc35793158d8894d7..7967b013cbae9debaf06b29750937ec00457b6d5 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/kthread.h>
 #include <linux/uio.h>
 #include <linux/types.h>
+#include <linux/completion.h>
 
 #include "types.h"
 #include "lnetctl.h"
@@ -610,7 +611,7 @@ typedef struct {
        /* rcd ready for free */
        struct list_head                  ln_rcd_zombie;
        /* serialise startup/shutdown */
-       struct semaphore                  ln_rc_signal;
+       struct completion                 ln_rc_signal;
 
        struct mutex                      ln_api_mutex;
        struct mutex                      ln_lnd_mutex;
index 1c679cb727859f81e3ef5b79f28445f3119679cb..e098b6c086e102c5f6ad4cec126c17dce2e394f4 100644 (file)
@@ -68,9 +68,9 @@ typedef __u64 lnet_nid_t;
 typedef __u32 lnet_pid_t;
 
 /** wildcard NID that matches any end-point address */
-#define LNET_NID_ANY   ((lnet_nid_t) -1)
+#define LNET_NID_ANY   ((lnet_nid_t)(-1))
 /** wildcard PID that matches any lnet_pid_t */
-#define LNET_PID_ANY   ((lnet_pid_t) -1)
+#define LNET_PID_ANY   ((lnet_pid_t)(-1))
 
 #define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
 #define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
index 6c59f2ff222040df485ce7c33de5fd5c97d451c3..4f5978b3767bfba14b7e73be2d879de0a69afed1 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -44,7 +40,7 @@
 
 static lnd_t the_o2iblnd;
 
-kib_data_t kiblnd_data;
+struct kib_data kiblnd_data;
 
 static __u32 kiblnd_cksum(void *ptr, int nob)
 {
@@ -98,40 +94,40 @@ static char *kiblnd_msgtype2str(int type)
 
 static int kiblnd_msgtype2size(int type)
 {
-       const int hdr_size = offsetof(kib_msg_t, ibm_u);
+       const int hdr_size = offsetof(struct kib_msg, ibm_u);
 
        switch (type) {
        case IBLND_MSG_CONNREQ:
        case IBLND_MSG_CONNACK:
-               return hdr_size + sizeof(kib_connparams_t);
+               return hdr_size + sizeof(struct kib_connparams);
 
        case IBLND_MSG_NOOP:
                return hdr_size;
 
        case IBLND_MSG_IMMEDIATE:
-               return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
+               return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]);
 
        case IBLND_MSG_PUT_REQ:
-               return hdr_size + sizeof(kib_putreq_msg_t);
+               return hdr_size + sizeof(struct kib_putreq_msg);
 
        case IBLND_MSG_PUT_ACK:
-               return hdr_size + sizeof(kib_putack_msg_t);
+               return hdr_size + sizeof(struct kib_putack_msg);
 
        case IBLND_MSG_GET_REQ:
-               return hdr_size + sizeof(kib_get_msg_t);
+               return hdr_size + sizeof(struct kib_get_msg);
 
        case IBLND_MSG_PUT_NAK:
        case IBLND_MSG_PUT_DONE:
        case IBLND_MSG_GET_DONE:
-               return hdr_size + sizeof(kib_completion_msg_t);
+               return hdr_size + sizeof(struct kib_completion_msg);
        default:
                return -1;
        }
 }
 
-static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
+static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
 {
-       kib_rdma_desc_t *rd;
+       struct kib_rdma_desc *rd;
        int nob;
        int n;
        int i;
@@ -156,7 +152,7 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
                return 1;
        }
 
-       nob = offsetof(kib_msg_t, ibm_u) +
+       nob = offsetof(struct kib_msg, ibm_u) +
              kiblnd_rd_msg_size(rd, msg->ibm_type, n);
 
        if (msg->ibm_nob < nob) {
@@ -176,10 +172,10 @@ static int kiblnd_unpack_rd(kib_msg_t *msg, int flip)
        return 0;
 }
 
-void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version,
                     int credits, lnet_nid_t dstnid, __u64 dststamp)
 {
-       kib_net_t *net = ni->ni_data;
+       struct kib_net *net = ni->ni_data;
 
        /*
         * CAVEAT EMPTOR! all message fields not set here should have been
@@ -202,9 +198,9 @@ void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
        }
 }
 
-int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
+int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
 {
-       const int hdr_size = offsetof(kib_msg_t, ibm_u);
+       const int hdr_size = offsetof(struct kib_msg, ibm_u);
        __u32 msg_cksum;
        __u16 version;
        int msg_nob;
@@ -315,10 +311,10 @@ int kiblnd_unpack_msg(kib_msg_t *msg, int nob)
        return 0;
 }
 
-int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
+int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid)
 {
-       kib_peer_t *peer;
-       kib_net_t *net = ni->ni_data;
+       struct kib_peer *peer;
+       struct kib_net *net = ni->ni_data;
        int cpt = lnet_cpt_of_nid(nid);
        unsigned long flags;
 
@@ -357,9 +353,9 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
        return 0;
 }
 
-void kiblnd_destroy_peer(kib_peer_t *peer)
+void kiblnd_destroy_peer(struct kib_peer *peer)
 {
-       kib_net_t *net = peer->ibp_ni->ni_data;
+       struct kib_net *net = peer->ibp_ni->ni_data;
 
        LASSERT(net);
        LASSERT(!atomic_read(&peer->ibp_refcount));
@@ -378,7 +374,7 @@ void kiblnd_destroy_peer(kib_peer_t *peer)
        atomic_dec(&net->ibn_npeers);
 }
 
-kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
+struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid)
 {
        /*
         * the caller is responsible for accounting the additional reference
@@ -386,10 +382,10 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
         */
        struct list_head *peer_list = kiblnd_nid2peerlist(nid);
        struct list_head *tmp;
-       kib_peer_t *peer;
+       struct kib_peer *peer;
 
        list_for_each(tmp, peer_list) {
-               peer = list_entry(tmp, kib_peer_t, ibp_list);
+               peer = list_entry(tmp, struct kib_peer, ibp_list);
                LASSERT(!kiblnd_peer_idle(peer));
 
                if (peer->ibp_nid != nid)
@@ -404,7 +400,7 @@ kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid)
        return NULL;
 }
 
-void kiblnd_unlink_peer_locked(kib_peer_t *peer)
+void kiblnd_unlink_peer_locked(struct kib_peer *peer)
 {
        LASSERT(list_empty(&peer->ibp_conns));
 
@@ -417,7 +413,7 @@ void kiblnd_unlink_peer_locked(kib_peer_t *peer)
 static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
                                lnet_nid_t *nidp, int *count)
 {
-       kib_peer_t *peer;
+       struct kib_peer *peer;
        struct list_head *ptmp;
        int i;
        unsigned long flags;
@@ -426,7 +422,7 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
 
        for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
                list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-                       peer = list_entry(ptmp, kib_peer_t, ibp_list);
+                       peer = list_entry(ptmp, struct kib_peer, ibp_list);
                        LASSERT(!kiblnd_peer_idle(peer));
 
                        if (peer->ibp_ni != ni)
@@ -448,17 +444,17 @@ static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
        return -ENOENT;
 }
 
-static void kiblnd_del_peer_locked(kib_peer_t *peer)
+static void kiblnd_del_peer_locked(struct kib_peer *peer)
 {
        struct list_head *ctmp;
        struct list_head *cnxt;
-       kib_conn_t *conn;
+       struct kib_conn *conn;
 
        if (list_empty(&peer->ibp_conns)) {
                kiblnd_unlink_peer_locked(peer);
        } else {
                list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
-                       conn = list_entry(ctmp, kib_conn_t, ibc_list);
+                       conn = list_entry(ctmp, struct kib_conn, ibc_list);
 
                        kiblnd_close_conn_locked(conn, 0);
                }
@@ -475,7 +471,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
        LIST_HEAD(zombies);
        struct list_head *ptmp;
        struct list_head *pnxt;
-       kib_peer_t *peer;
+       struct kib_peer *peer;
        int lo;
        int hi;
        int i;
@@ -494,7 +490,7 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
 
        for (i = lo; i <= hi; i++) {
                list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
-                       peer = list_entry(ptmp, kib_peer_t, ibp_list);
+                       peer = list_entry(ptmp, struct kib_peer, ibp_list);
                        LASSERT(!kiblnd_peer_idle(peer));
 
                        if (peer->ibp_ni != ni)
@@ -522,11 +518,11 @@ static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
        return rc;
 }
 
-static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
+static struct kib_conn *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
 {
-       kib_peer_t *peer;
+       struct kib_peer *peer;
        struct list_head *ptmp;
-       kib_conn_t *conn;
+       struct kib_conn *conn;
        struct list_head *ctmp;
        int i;
        unsigned long flags;
@@ -535,7 +531,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
 
        for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
                list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
-                       peer = list_entry(ptmp, kib_peer_t, ibp_list);
+                       peer = list_entry(ptmp, struct kib_peer, ibp_list);
                        LASSERT(!kiblnd_peer_idle(peer));
 
                        if (peer->ibp_ni != ni)
@@ -545,7 +541,7 @@ static kib_conn_t *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
                                if (index-- > 0)
                                        continue;
 
-                               conn = list_entry(ctmp, kib_conn_t,
+                               conn = list_entry(ctmp, struct kib_conn,
                                                  ibc_list);
                                kiblnd_conn_addref(conn);
                                read_unlock_irqrestore(
@@ -594,7 +590,7 @@ static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
                cmid->route.path_rec->mtu = mtu;
 }
 
-static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
+static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
 {
        cpumask_t *mask;
        int vectors;
@@ -621,7 +617,7 @@ static int kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
        return 1;
 }
 
-kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid,
                               int state, int version)
 {
        /*
@@ -634,12 +630,12 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
         * its ref on 'cmid').
         */
        rwlock_t *glock = &kiblnd_data.kib_global_lock;
-       kib_net_t *net = peer->ibp_ni->ni_data;
-       kib_dev_t *dev;
+       struct kib_net *net = peer->ibp_ni->ni_data;
+       struct kib_dev *dev;
        struct ib_qp_init_attr *init_qp_attr;
        struct kib_sched_info *sched;
        struct ib_cq_init_attr cq_attr = {};
-       kib_conn_t *conn;
+       struct kib_conn *conn;
        struct ib_cq *cq;
        unsigned long flags;
        int cpt;
@@ -723,7 +719,7 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
        write_unlock_irqrestore(glock, flags);
 
        LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
-                        IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+                        IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
        if (!conn->ibc_rxs) {
                CERROR("Cannot allocate RX buffers\n");
                goto failed_2;
@@ -833,10 +829,10 @@ kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
        return NULL;
 }
 
-void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
+void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
 {
        struct rdma_cm_id *cmid = conn->ibc_cmid;
-       kib_peer_t *peer = conn->ibc_peer;
+       struct kib_peer *peer = conn->ibc_peer;
        int rc;
 
        LASSERT(!in_interrupt());
@@ -879,7 +875,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
 
        if (conn->ibc_rxs) {
                LIBCFS_FREE(conn->ibc_rxs,
-                           IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+                           IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
        }
 
        if (conn->ibc_connvars)
@@ -890,7 +886,7 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
 
        /* See CAVEAT EMPTOR above in kiblnd_create_conn */
        if (conn->ibc_state != IBLND_CONN_INIT) {
-               kib_net_t *net = peer->ibp_ni->ni_data;
+               struct kib_net *net = peer->ibp_ni->ni_data;
 
                kiblnd_peer_decref(peer);
                rdma_destroy_id(cmid);
@@ -900,15 +896,15 @@ void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
        LIBCFS_FREE(conn, sizeof(*conn));
 }
 
-int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
+int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
 {
-       kib_conn_t *conn;
+       struct kib_conn *conn;
        struct list_head *ctmp;
        struct list_head *cnxt;
        int count = 0;
 
        list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
-               conn = list_entry(ctmp, kib_conn_t, ibc_list);
+               conn = list_entry(ctmp, struct kib_conn, ibc_list);
 
                CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
                       libcfs_nid2str(peer->ibp_nid),
@@ -921,16 +917,16 @@ int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
        return count;
 }
 
-int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
                                    int version, __u64 incarnation)
 {
-       kib_conn_t *conn;
+       struct kib_conn *conn;
        struct list_head *ctmp;
        struct list_head *cnxt;
        int count = 0;
 
        list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
-               conn = list_entry(ctmp, kib_conn_t, ibc_list);
+               conn = list_entry(ctmp, struct kib_conn, ibc_list);
 
                if (conn->ibc_version     == version &&
                    conn->ibc_incarnation == incarnation)
@@ -951,7 +947,7 @@ int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
 
 static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
 {
-       kib_peer_t *peer;
+       struct kib_peer *peer;
        struct list_head *ptmp;
        struct list_head *pnxt;
        int lo;
@@ -972,7 +968,7 @@ static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
 
        for (i = lo; i <= hi; i++) {
                list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
-                       peer = list_entry(ptmp, kib_peer_t, ibp_list);
+                       peer = list_entry(ptmp, struct kib_peer, ibp_list);
                        LASSERT(!kiblnd_peer_idle(peer));
 
                        if (peer->ibp_ni != ni)
@@ -1016,7 +1012,7 @@ static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                break;
        }
        case IOC_LIBCFS_GET_CONN: {
-               kib_conn_t *conn;
+               struct kib_conn *conn;
 
                rc = 0;
                conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
@@ -1052,7 +1048,7 @@ static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
        unsigned long last_alive = 0;
        unsigned long now = cfs_time_current();
        rwlock_t *glock = &kiblnd_data.kib_global_lock;
-       kib_peer_t *peer;
+       struct kib_peer *peer;
        unsigned long flags;
 
        read_lock_irqsave(glock, flags);
@@ -1078,7 +1074,7 @@ static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
               last_alive ? cfs_duration_sec(now - last_alive) : -1);
 }
 
-static void kiblnd_free_pages(kib_pages_t *p)
+static void kiblnd_free_pages(struct kib_pages *p)
 {
        int npages = p->ibp_npages;
        int i;
@@ -1088,22 +1084,22 @@ static void kiblnd_free_pages(kib_pages_t *p)
                        __free_page(p->ibp_pages[i]);
        }
 
-       LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
+       LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
 }
 
-int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
+int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
 {
-       kib_pages_t *p;
+       struct kib_pages *p;
        int i;
 
        LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
-                        offsetof(kib_pages_t, ibp_pages[npages]));
+                        offsetof(struct kib_pages, ibp_pages[npages]));
        if (!p) {
                CERROR("Can't allocate descriptor for %d pages\n", npages);
                return -ENOMEM;
        }
 
-       memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
+       memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
        p->ibp_npages = npages;
 
        for (i = 0; i < npages; i++) {
@@ -1121,9 +1117,9 @@ int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
        return 0;
 }
 
-void kiblnd_unmap_rx_descs(kib_conn_t *conn)
+void kiblnd_unmap_rx_descs(struct kib_conn *conn)
 {
-       kib_rx_t *rx;
+       struct kib_rx *rx;
        int i;
 
        LASSERT(conn->ibc_rxs);
@@ -1145,9 +1141,9 @@ void kiblnd_unmap_rx_descs(kib_conn_t *conn)
        conn->ibc_rx_pages = NULL;
 }
 
-void kiblnd_map_rx_descs(kib_conn_t *conn)
+void kiblnd_map_rx_descs(struct kib_conn *conn)
 {
-       kib_rx_t *rx;
+       struct kib_rx *rx;
        struct page *pg;
        int pg_off;
        int ipg;
@@ -1158,7 +1154,7 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
                rx = &conn->ibc_rxs[i];
 
                rx->rx_conn = conn;
-               rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
+               rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off);
 
                rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
                                                       rx->rx_msg,
@@ -1183,10 +1179,10 @@ void kiblnd_map_rx_descs(kib_conn_t *conn)
        }
 }
 
-static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
+static void kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo)
 {
-       kib_hca_dev_t *hdev = tpo->tpo_hdev;
-       kib_tx_t *tx;
+       struct kib_hca_dev *hdev = tpo->tpo_hdev;
+       struct kib_tx *tx;
        int i;
 
        LASSERT(!tpo->tpo_pool.po_allocated);
@@ -1206,9 +1202,9 @@ static void kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
        tpo->tpo_hdev = NULL;
 }
 
-static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
+static struct kib_hca_dev *kiblnd_current_hdev(struct kib_dev *dev)
 {
-       kib_hca_dev_t *hdev;
+       struct kib_hca_dev *hdev;
        unsigned long flags;
        int i = 0;
 
@@ -1232,14 +1228,14 @@ static kib_hca_dev_t *kiblnd_current_hdev(kib_dev_t *dev)
        return hdev;
 }
 
-static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
+static void kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
 {
-       kib_pages_t *txpgs = tpo->tpo_tx_pages;
-       kib_pool_t *pool = &tpo->tpo_pool;
-       kib_net_t *net = pool->po_owner->ps_net;
-       kib_dev_t *dev;
+       struct kib_pages *txpgs = tpo->tpo_tx_pages;
+       struct kib_pool *pool = &tpo->tpo_pool;
+       struct kib_net *net = pool->po_owner->ps_net;
+       struct kib_dev *dev;
        struct page *page;
-       kib_tx_t *tx;
+       struct kib_tx *tx;
        int page_offset;
        int ipage;
        int i;
@@ -1260,7 +1256,7 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
                page = txpgs->ibp_pages[ipage];
                tx = &tpo->tpo_tx_descs[i];
 
-               tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
+               tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) +
                                           page_offset);
 
                tx->tx_msgaddr = kiblnd_dma_map_single(
@@ -1283,11 +1279,11 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
        }
 }
 
-struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd,
                                    int negotiated_nfrags)
 {
-       kib_net_t *net = ni->ni_data;
-       kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+       struct kib_net *net = ni->ni_data;
+       struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
        struct lnet_ioctl_config_o2iblnd_tunables *tunables;
        __u16 nfrags;
        int mod;
@@ -1304,7 +1300,7 @@ struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
        return hdev->ibh_mrs;
 }
 
-static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
+static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
 {
        LASSERT(!fpo->fpo_map_count);
 
@@ -1335,7 +1331,7 @@ static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
 
 static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
 {
-       kib_fmr_pool_t *fpo, *tmp;
+       struct kib_fmr_pool *fpo, *tmp;
 
        list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
                list_del(&fpo->fpo_list);
@@ -1361,7 +1357,7 @@ kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
        return max(IBLND_FMR_POOL_FLUSH, size);
 }
 
-static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
 {
        struct ib_fmr_pool_param param = {
                .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
@@ -1388,7 +1384,7 @@ static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
        return rc;
 }
 
-static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
 {
        struct kib_fast_reg_descriptor *frd, *tmp;
        int i, rc;
@@ -1438,12 +1434,12 @@ out:
        return rc;
 }
 
-static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
-                                 kib_fmr_pool_t **pp_fpo)
+static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
+                                 struct kib_fmr_pool **pp_fpo)
 {
-       kib_dev_t *dev = fps->fps_net->ibn_dev;
+       struct kib_dev *dev = fps->fps_net->ibn_dev;
        struct ib_device_attr *dev_attr;
-       kib_fmr_pool_t *fpo;
+       struct kib_fmr_pool *fpo;
        int rc;
 
        LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
@@ -1488,7 +1484,7 @@ out_fpo:
        return rc;
 }
 
-static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
+static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps,
                                    struct list_head *zombies)
 {
        if (!fps->fps_net) /* intialized? */
@@ -1497,8 +1493,8 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
        spin_lock(&fps->fps_lock);
 
        while (!list_empty(&fps->fps_pool_list)) {
-               kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
-                                                kib_fmr_pool_t, fpo_list);
+               struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
+                                                struct kib_fmr_pool, fpo_list);
                fpo->fpo_failed = 1;
                list_del(&fpo->fpo_list);
                if (!fpo->fpo_map_count)
@@ -1510,7 +1506,7 @@ static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
        spin_unlock(&fps->fps_lock);
 }
 
-static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
+static void kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps)
 {
        if (fps->fps_net) { /* initialized? */
                kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
@@ -1519,11 +1515,11 @@ static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
 }
 
 static int
-kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
-                       kib_net_t *net,
+kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts,
+                       struct kib_net *net,
                        struct lnet_ioctl_config_o2iblnd_tunables *tunables)
 {
-       kib_fmr_pool_t *fpo;
+       struct kib_fmr_pool *fpo;
        int rc;
 
        memset(fps, 0, sizeof(*fps));
@@ -1546,7 +1542,7 @@ kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
        return rc;
 }
 
-static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
+static int kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, unsigned long now)
 {
        if (fpo->fpo_map_count) /* still in use */
                return 0;
@@ -1556,10 +1552,10 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
 }
 
 static int
-kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
 {
        __u64 *pages = tx->tx_pages;
-       kib_hca_dev_t *hdev;
+       struct kib_hca_dev *hdev;
        int npages;
        int size;
        int i;
@@ -1577,13 +1573,13 @@ kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
        return npages;
 }
 
-void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
+void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
 {
        LIST_HEAD(zombies);
-       kib_fmr_pool_t *fpo = fmr->fmr_pool;
-       kib_fmr_poolset_t *fps;
+       struct kib_fmr_pool *fpo = fmr->fmr_pool;
+       struct kib_fmr_poolset *fps;
        unsigned long now = cfs_time_current();
-       kib_fmr_pool_t *tmp;
+       struct kib_fmr_pool *tmp;
        int rc;
 
        if (!fpo)
@@ -1633,14 +1629,14 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
                kiblnd_destroy_fmr_pool_list(&zombies);
 }
 
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
-                       kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
-                       kib_fmr_t *fmr)
+int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
+                       struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
+                       struct kib_fmr *fmr)
 {
        __u64 *pages = tx->tx_pages;
        bool is_rx = (rd != tx->tx_rd);
         bool tx_pages_mapped = 0;
-       kib_fmr_pool_t *fpo;
+       struct kib_fmr_pool *fpo;
        int npages = 0;
        __u64 version;
        int rc;
@@ -1780,7 +1776,7 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
        goto again;
 }
 
-static void kiblnd_fini_pool(kib_pool_t *pool)
+static void kiblnd_fini_pool(struct kib_pool *pool)
 {
        LASSERT(list_empty(&pool->po_free_list));
        LASSERT(!pool->po_allocated);
@@ -1788,7 +1784,7 @@ static void kiblnd_fini_pool(kib_pool_t *pool)
        CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
 }
 
-static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
+static void kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size)
 {
        CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
 
@@ -1801,10 +1797,10 @@ static void kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
 
 static void kiblnd_destroy_pool_list(struct list_head *head)
 {
-       kib_pool_t *pool;
+       struct kib_pool *pool;
 
        while (!list_empty(head)) {
-               pool = list_entry(head->next, kib_pool_t, po_list);
+               pool = list_entry(head->next, struct kib_pool, po_list);
                list_del(&pool->po_list);
 
                LASSERT(pool->po_owner);
@@ -1812,15 +1808,15 @@ static void kiblnd_destroy_pool_list(struct list_head *head)
        }
 }
 
-static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
+static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
 {
        if (!ps->ps_net) /* intialized? */
                return;
 
        spin_lock(&ps->ps_lock);
        while (!list_empty(&ps->ps_pool_list)) {
-               kib_pool_t *po = list_entry(ps->ps_pool_list.next,
-                                           kib_pool_t, po_list);
+               struct kib_pool *po = list_entry(ps->ps_pool_list.next,
+                                           struct kib_pool, po_list);
                po->po_failed = 1;
                list_del(&po->po_list);
                if (!po->po_allocated)
@@ -1831,7 +1827,7 @@ static void kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
        spin_unlock(&ps->ps_lock);
 }
 
-static void kiblnd_fini_poolset(kib_poolset_t *ps)
+static void kiblnd_fini_poolset(struct kib_poolset *ps)
 {
        if (ps->ps_net) { /* initialized? */
                kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
@@ -1839,14 +1835,14 @@ static void kiblnd_fini_poolset(kib_poolset_t *ps)
        }
 }
 
-static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
-                              kib_net_t *net, char *name, int size,
+static int kiblnd_init_poolset(struct kib_poolset *ps, int cpt,
+                              struct kib_net *net, char *name, int size,
                               kib_ps_pool_create_t po_create,
                               kib_ps_pool_destroy_t po_destroy,
                               kib_ps_node_init_t nd_init,
                               kib_ps_node_fini_t nd_fini)
 {
-       kib_pool_t *pool;
+       struct kib_pool *pool;
        int rc;
 
        memset(ps, 0, sizeof(*ps));
@@ -1874,7 +1870,7 @@ static int kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
        return rc;
 }
 
-static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
+static int kiblnd_pool_is_idle(struct kib_pool *pool, unsigned long now)
 {
        if (pool->po_allocated) /* still in use */
                return 0;
@@ -1883,11 +1879,11 @@ static int kiblnd_pool_is_idle(kib_pool_t *pool, unsigned long now)
        return cfs_time_aftereq(now, pool->po_deadline);
 }
 
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
+void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
 {
        LIST_HEAD(zombies);
-       kib_poolset_t *ps = pool->po_owner;
-       kib_pool_t *tmp;
+       struct kib_poolset *ps = pool->po_owner;
+       struct kib_pool *tmp;
        unsigned long now = cfs_time_current();
 
        spin_lock(&ps->ps_lock);
@@ -1913,10 +1909,10 @@ void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
                kiblnd_destroy_pool_list(&zombies);
 }
 
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
+struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
 {
        struct list_head *node;
-       kib_pool_t *pool;
+       struct kib_pool *pool;
        unsigned int interval = 1;
        unsigned long time_before;
        unsigned int trips = 0;
@@ -1986,9 +1982,9 @@ struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps)
        goto again;
 }
 
-static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
+static void kiblnd_destroy_tx_pool(struct kib_pool *pool)
 {
-       kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
+       struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool, tpo_pool);
        int i;
 
        LASSERT(!pool->po_allocated);
@@ -2002,7 +1998,7 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
                goto out;
 
        for (i = 0; i < pool->po_size; i++) {
-               kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+               struct kib_tx *tx = &tpo->tpo_tx_descs[i];
 
                list_del(&tx->tx_list);
                if (tx->tx_pages)
@@ -2011,8 +2007,8 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
                                    sizeof(*tx->tx_pages));
                if (tx->tx_frags)
                        LIBCFS_FREE(tx->tx_frags,
-                                   IBLND_MAX_RDMA_FRAGS *
-                                           sizeof(*tx->tx_frags));
+                                   (1 + IBLND_MAX_RDMA_FRAGS) *
+                                    sizeof(*tx->tx_frags));
                if (tx->tx_wrq)
                        LIBCFS_FREE(tx->tx_wrq,
                                    (1 + IBLND_MAX_RDMA_FRAGS) *
@@ -2023,12 +2019,12 @@ static void kiblnd_destroy_tx_pool(kib_pool_t *pool)
                                    sizeof(*tx->tx_sge));
                if (tx->tx_rd)
                        LIBCFS_FREE(tx->tx_rd,
-                                   offsetof(kib_rdma_desc_t,
+                                   offsetof(struct kib_rdma_desc,
                                             rd_frags[IBLND_MAX_RDMA_FRAGS]));
        }
 
        LIBCFS_FREE(tpo->tpo_tx_descs,
-                   pool->po_size * sizeof(kib_tx_t));
+                   pool->po_size * sizeof(struct kib_tx));
 out:
        kiblnd_fini_pool(pool);
        LIBCFS_FREE(tpo, sizeof(*tpo));
@@ -2041,13 +2037,13 @@ static int kiblnd_tx_pool_size(int ncpts)
        return max(IBLND_TX_POOL, ntx);
 }
 
-static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
-                                kib_pool_t **pp_po)
+static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
+                                struct kib_pool **pp_po)
 {
        int i;
        int npg;
-       kib_pool_t *pool;
-       kib_tx_pool_t *tpo;
+       struct kib_pool *pool;
+       struct kib_tx_pool *tpo;
 
        LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
        if (!tpo) {
@@ -2068,17 +2064,17 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
        }
 
        LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
-                        size * sizeof(kib_tx_t));
+                        size * sizeof(struct kib_tx));
        if (!tpo->tpo_tx_descs) {
                CERROR("Can't allocate %d tx descriptors\n", size);
                ps->ps_pool_destroy(pool);
                return -ENOMEM;
        }
 
-       memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
+       memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx));
 
        for (i = 0; i < size; i++) {
-               kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+               struct kib_tx *tx = &tpo->tpo_tx_descs[i];
 
                tx->tx_pool = tpo;
                if (ps->ps_net->ibn_fmr_ps) {
@@ -2090,11 +2086,12 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
                }
 
                LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
-                                IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
+                                (1 + IBLND_MAX_RDMA_FRAGS) *
+                                sizeof(*tx->tx_frags));
                if (!tx->tx_frags)
                        break;
 
-               sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
+               sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
 
                LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
                                 (1 + IBLND_MAX_RDMA_FRAGS) *
@@ -2109,7 +2106,7 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
                        break;
 
                LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
-                                offsetof(kib_rdma_desc_t,
+                                offsetof(struct kib_rdma_desc,
                                          rd_frags[IBLND_MAX_RDMA_FRAGS]));
                if (!tx->tx_rd)
                        break;
@@ -2125,22 +2122,23 @@ static int kiblnd_create_tx_pool(kib_poolset_t *ps, int size,
        return -ENOMEM;
 }
 
-static void kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
+static void kiblnd_tx_init(struct kib_pool *pool, struct list_head *node)
 {
-       kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
-                                            tps_poolset);
-       kib_tx_t *tx  = list_entry(node, kib_tx_t, tx_list);
+       struct kib_tx_poolset *tps = container_of(pool->po_owner,
+                                                 struct kib_tx_poolset,
+                                                 tps_poolset);
+       struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list);
 
        tx->tx_cookie = tps->tps_next_tx_cookie++;
 }
 
-static void kiblnd_net_fini_pools(kib_net_t *net)
+static void kiblnd_net_fini_pools(struct kib_net *net)
 {
        int i;
 
        cfs_cpt_for_each(i, lnet_cpt_table()) {
-               kib_tx_poolset_t *tps;
-               kib_fmr_poolset_t *fps;
+               struct kib_tx_poolset *tps;
+               struct kib_fmr_poolset *fps;
 
                if (net->ibn_tx_ps) {
                        tps = net->ibn_tx_ps[i];
@@ -2164,7 +2162,7 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
        }
 }
 
-static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
+static int kiblnd_net_init_pools(struct kib_net *net, lnet_ni_t *ni, __u32 *cpts,
                                 int ncpts)
 {
        struct lnet_ioctl_config_o2iblnd_tunables *tunables;
@@ -2206,7 +2204,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
         * number of CPTs that exist, i.e net->ibn_fmr_ps[cpt].
         */
        net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
-                                          sizeof(kib_fmr_poolset_t));
+                                          sizeof(struct kib_fmr_poolset));
        if (!net->ibn_fmr_ps) {
                CERROR("Failed to allocate FMR pool array\n");
                rc = -ENOMEM;
@@ -2234,7 +2232,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
         * number of CPTs that exist, i.e net->ibn_tx_ps[cpt].
         */
        net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
-                                         sizeof(kib_tx_poolset_t));
+                                         sizeof(struct kib_tx_poolset));
        if (!net->ibn_tx_ps) {
                CERROR("Failed to allocate tx pool array\n");
                rc = -ENOMEM;
@@ -2263,7 +2261,7 @@ static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
        return rc;
 }
 
-static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
+static int kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
 {
        /*
         * It's safe to assume a HCA can handle a page size
@@ -2283,7 +2281,7 @@ static int kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
        return -EINVAL;
 }
 
-static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
+static void kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
 {
        if (!hdev->ibh_mrs)
                return;
@@ -2293,7 +2291,7 @@ static void kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
        hdev->ibh_mrs = NULL;
 }
 
-void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
+void kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
 {
        kiblnd_hdev_cleanup_mrs(hdev);
 
@@ -2306,7 +2304,7 @@ void kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
        LIBCFS_FREE(hdev, sizeof(*hdev));
 }
 
-static int kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
+static int kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
 {
        struct ib_mr *mr;
        int rc;
@@ -2335,7 +2333,7 @@ static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
        return 0;
 }
 
-static int kiblnd_dev_need_failover(kib_dev_t *dev)
+static int kiblnd_dev_need_failover(struct kib_dev *dev)
 {
        struct rdma_cm_id *cmid;
        struct sockaddr_in srcaddr;
@@ -2389,15 +2387,15 @@ static int kiblnd_dev_need_failover(kib_dev_t *dev)
        return rc;
 }
 
-int kiblnd_dev_failover(kib_dev_t *dev)
+int kiblnd_dev_failover(struct kib_dev *dev)
 {
        LIST_HEAD(zombie_tpo);
        LIST_HEAD(zombie_ppo);
        LIST_HEAD(zombie_fpo);
        struct rdma_cm_id *cmid  = NULL;
-       kib_hca_dev_t *hdev  = NULL;
+       struct kib_hca_dev *hdev  = NULL;
        struct ib_pd *pd;
-       kib_net_t *net;
+       struct kib_net *net;
        struct sockaddr_in addr;
        unsigned long flags;
        int rc = 0;
@@ -2522,7 +2520,7 @@ int kiblnd_dev_failover(kib_dev_t *dev)
        return rc;
 }
 
-void kiblnd_destroy_dev(kib_dev_t *dev)
+void kiblnd_destroy_dev(struct kib_dev *dev)
 {
        LASSERT(!dev->ibd_nnets);
        LASSERT(list_empty(&dev->ibd_nets));
@@ -2536,10 +2534,10 @@ void kiblnd_destroy_dev(kib_dev_t *dev)
        LIBCFS_FREE(dev, sizeof(*dev));
 }
 
-static kib_dev_t *kiblnd_create_dev(char *ifname)
+static struct kib_dev *kiblnd_create_dev(char *ifname)
 {
        struct net_device *netdev;
-       kib_dev_t *dev;
+       struct kib_dev *dev;
        __u32 netmask;
        __u32 ip;
        int up;
@@ -2654,7 +2652,7 @@ static void kiblnd_base_shutdown(void)
 
 static void kiblnd_shutdown(lnet_ni_t *ni)
 {
-       kib_net_t *net = ni->ni_data;
+       struct kib_net *net = ni->ni_data;
        rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
        int i;
        unsigned long flags;
@@ -2851,7 +2849,7 @@ static int kiblnd_start_schedulers(struct kib_sched_info *sched)
        return rc;
 }
 
-static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
+static int kiblnd_dev_start_threads(struct kib_dev *dev, int newdev, __u32 *cpts,
                                    int ncpts)
 {
        int cpt;
@@ -2877,10 +2875,10 @@ static int kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts,
        return 0;
 }
 
-static kib_dev_t *kiblnd_dev_search(char *ifname)
+static struct kib_dev *kiblnd_dev_search(char *ifname)
 {
-       kib_dev_t *alias = NULL;
-       kib_dev_t *dev;
+       struct kib_dev *alias = NULL;
+       struct kib_dev *dev;
        char *colon;
        char *colon2;
 
@@ -2912,8 +2910,8 @@ static kib_dev_t *kiblnd_dev_search(char *ifname)
 static int kiblnd_startup(lnet_ni_t *ni)
 {
        char *ifname;
-       kib_dev_t *ibdev = NULL;
-       kib_net_t *net;
+       struct kib_dev *ibdev = NULL;
+       struct kib_net *net;
        struct timespec64 tv;
        unsigned long flags;
        int rc;
@@ -3020,11 +3018,11 @@ static void __exit ko2iblnd_exit(void)
 
 static int __init ko2iblnd_init(void)
 {
-       CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
-       CLASSERT(offsetof(kib_msg_t,
+       CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE);
+       CLASSERT(offsetof(struct kib_msg,
                          ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
                          <= IBLND_MSG_SIZE);
-       CLASSERT(offsetof(kib_msg_t,
+       CLASSERT(offsetof(struct kib_msg,
                          ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
                          <= IBLND_MSG_SIZE);
 
index b22984fd9ad32a24866e69186f0cb7460f713b01..078a0c3e88455b119607eddc4188e990cc9960db 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
 #define IBLND_N_SCHED                  2
 #define IBLND_N_SCHED_HIGH             4
 
-typedef struct {
+struct kib_tunables {
        int *kib_dev_failover;           /* HCA failover */
        unsigned int *kib_service;       /* IB service number */
        int *kib_min_reconnect_interval; /* first failed connection retry... */
        int *kib_max_reconnect_interval; /* exponentially increasing to this */
-       int *kib_cksum;                  /* checksum kib_msg_t? */
+       int *kib_cksum;                  /* checksum struct kib_msg? */
        int *kib_timeout;                /* comms timeout (seconds) */
        int *kib_keepalive;              /* keepalive timeout (seconds) */
        int *kib_ntx;                    /* # tx descs */
@@ -94,22 +90,22 @@ typedef struct {
        int *kib_require_priv_port;      /* accept only privileged ports */
        int *kib_use_priv_port; /* use privileged port for active connect */
        int *kib_nscheds;                /* # threads on each CPT */
-} kib_tunables_t;
+};
 
-extern kib_tunables_t  kiblnd_tunables;
+extern struct kib_tunables  kiblnd_tunables;
 
 #define IBLND_MSG_QUEUE_SIZE_V1   8 /* V1 only : # messages/RDMAs in-flight */
 #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
 
 #define IBLND_CREDITS_DEFAULT     8 /* default # of peer credits */
-#define IBLND_CREDITS_MAX        ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1)  /* Max # of peer credits */
+#define IBLND_CREDITS_MAX        ((typeof(((struct kib_msg *)0)->ibm_credits)) - 1)  /* Max # of peer credits */
 
 /* when eagerly to return credits */
 #define IBLND_CREDITS_HIGHWATER(t, v)  ((v) == IBLND_MSG_VERSION_1 ? \
                                        IBLND_CREDIT_HIGHWATER_V1 : \
                                        t->lnd_peercredits_hiw)
 
-#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \
+#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \
                                                               cb, dev, \
                                                               ps, qpt)
 
@@ -150,7 +146,7 @@ struct kib_hca_dev;
 #define KIB_IFNAME_SIZE              256
 #endif
 
-typedef struct {
+struct kib_dev {
        struct list_head   ibd_list;            /* chain on kib_devs */
        struct list_head   ibd_fail_list;       /* chain on kib_failed_devs */
        __u32              ibd_ifip;            /* IPoIB interface IP */
@@ -165,9 +161,9 @@ typedef struct {
        unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
        struct list_head   ibd_nets;
        struct kib_hca_dev *ibd_hdev;
-} kib_dev_t;
+};
 
-typedef struct kib_hca_dev {
+struct kib_hca_dev {
        struct rdma_cm_id  *ibh_cmid;           /* listener cmid */
        struct ib_device   *ibh_ibdev;          /* IB device */
        int                ibh_page_shift;      /* page shift of current HCA */
@@ -177,19 +173,19 @@ typedef struct kib_hca_dev {
        __u64              ibh_mr_size;         /* size of MR */
        struct ib_mr       *ibh_mrs;            /* global MR */
        struct ib_pd       *ibh_pd;             /* PD */
-       kib_dev_t          *ibh_dev;            /* owner */
+       struct kib_dev     *ibh_dev;            /* owner */
        atomic_t           ibh_ref;             /* refcount */
-} kib_hca_dev_t;
+};
 
 /** # of seconds to keep pool alive */
 #define IBLND_POOL_DEADLINE     300
 /** # of seconds to retry if allocation failed */
 #define IBLND_POOL_RETRY       1
 
-typedef struct {
+struct kib_pages {
        int                ibp_npages;          /* # pages */
        struct page        *ibp_pages[0];       /* page array */
-} kib_pages_t;
+};
 
 struct kib_pool;
 struct kib_poolset;
@@ -204,7 +200,7 @@ struct kib_net;
 
 #define IBLND_POOL_NAME_LEN     32
 
-typedef struct kib_poolset {
+struct kib_poolset {
        spinlock_t            ps_lock;            /* serialize */
        struct kib_net        *ps_net;            /* network it belongs to */
        char                  ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
@@ -220,31 +216,31 @@ typedef struct kib_poolset {
        kib_ps_pool_destroy_t ps_pool_destroy;    /* destroy a pool */
        kib_ps_node_init_t    ps_node_init; /* initialize new allocated node */
        kib_ps_node_fini_t    ps_node_fini;       /* finalize node */
-} kib_poolset_t;
+};
 
-typedef struct kib_pool {
+struct kib_pool {
        struct list_head      po_list;       /* chain on pool list */
        struct list_head      po_free_list;  /* pre-allocated node */
-       kib_poolset_t         *po_owner;     /* pool_set of this pool */
+       struct kib_poolset      *po_owner;      /* pool_set of this pool */
        unsigned long         po_deadline;   /* deadline of this pool */
        int                   po_allocated;  /* # of elements in use */
        int                   po_failed;     /* pool is created on failed HCA */
        int                   po_size;       /* # of pre-allocated elements */
-} kib_pool_t;
+};
 
-typedef struct {
-       kib_poolset_t         tps_poolset;        /* pool-set */
+struct kib_tx_poolset {
+       struct kib_poolset      tps_poolset;            /* pool-set */
        __u64                 tps_next_tx_cookie; /* cookie of TX */
-} kib_tx_poolset_t;
+};
 
-typedef struct {
-       kib_pool_t            tpo_pool;           /* pool */
-       struct kib_hca_dev    *tpo_hdev;          /* device for this pool */
-       struct kib_tx         *tpo_tx_descs;      /* all the tx descriptors */
-       kib_pages_t           *tpo_tx_pages;      /* premapped tx msg pages */
-} kib_tx_pool_t;
+struct kib_tx_pool {
+       struct kib_pool          tpo_pool;      /* pool */
+       struct kib_hca_dev      *tpo_hdev;      /* device for this pool */
+       struct kib_tx           *tpo_tx_descs;  /* all the tx descriptors */
+       struct kib_pages        *tpo_tx_pages;  /* premapped tx msg pages */
+};
 
-typedef struct {
+struct kib_fmr_poolset {
        spinlock_t            fps_lock;            /* serialize */
        struct kib_net        *fps_net;            /* IB network */
        struct list_head      fps_pool_list;       /* FMR pool list */
@@ -257,7 +253,7 @@ typedef struct {
        int                   fps_increasing;      /* is allocating new pool */
        unsigned long         fps_next_retry;      /* time stamp for retry if*/
                                                   /* failed to allocate */
-} kib_fmr_poolset_t;
+};
 
 struct kib_fast_reg_descriptor { /* For fast registration */
        struct list_head                 frd_list;
@@ -267,10 +263,10 @@ struct kib_fast_reg_descriptor { /* For fast registration */
        bool                             frd_valid;
 };
 
-typedef struct {
-       struct list_head      fpo_list;            /* chain on pool list */
-       struct kib_hca_dev    *fpo_hdev;           /* device for this pool */
-       kib_fmr_poolset_t     *fpo_owner;          /* owner of this pool */
+struct kib_fmr_pool {
+       struct list_head         fpo_list;      /* chain on pool list */
+       struct kib_hca_dev      *fpo_hdev;      /* device for this pool */
+       struct kib_fmr_poolset  *fpo_owner;     /* owner of this pool */
        union {
                struct {
                        struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
@@ -284,17 +280,17 @@ typedef struct {
        int                   fpo_failed;          /* fmr pool is failed */
        int                   fpo_map_count;       /* # of mapped FMR */
        int                   fpo_is_fmr;
-} kib_fmr_pool_t;
+};
 
-typedef struct {
-       kib_fmr_pool_t                  *fmr_pool;      /* pool of FMR */
+struct kib_fmr {
+       struct kib_fmr_pool             *fmr_pool;      /* pool of FMR */
        struct ib_pool_fmr              *fmr_pfmr;      /* IB pool fmr */
        struct kib_fast_reg_descriptor  *fmr_frd;
        u32                              fmr_key;
-} kib_fmr_t;
+};
 
-typedef struct kib_net {
-       struct list_head      ibn_list;       /* chain on kib_dev_t::ibd_nets */
+struct kib_net {
+       struct list_head      ibn_list;       /* chain on struct kib_dev::ibd_nets */
        __u64                 ibn_incarnation;/* my epoch */
        int                   ibn_init;       /* initialisation state */
        int                   ibn_shutdown;   /* shutting down? */
@@ -302,11 +298,11 @@ typedef struct kib_net {
        atomic_t              ibn_npeers;     /* # peers extant */
        atomic_t              ibn_nconns;     /* # connections extant */
 
-       kib_tx_poolset_t      **ibn_tx_ps;    /* tx pool-set */
-       kib_fmr_poolset_t     **ibn_fmr_ps;   /* fmr pool-set */
+       struct kib_tx_poolset   **ibn_tx_ps;    /* tx pool-set */
+       struct kib_fmr_poolset  **ibn_fmr_ps;   /* fmr pool-set */
 
-       kib_dev_t             *ibn_dev;       /* underlying IB device */
-} kib_net_t;
+       struct kib_dev          *ibn_dev;       /* underlying IB device */
+};
 
 #define KIB_THREAD_SHIFT               16
 #define KIB_THREAD_ID(cpt, tid)                ((cpt) << KIB_THREAD_SHIFT | (tid))
@@ -322,7 +318,7 @@ struct kib_sched_info {
        int                ibs_cpt;      /* CPT id */
 };
 
-typedef struct {
+struct kib_data {
        int               kib_init;           /* initialisation state */
        int               kib_shutdown;       /* shut down? */
        struct list_head  kib_devs;           /* IB devices extant */
@@ -349,7 +345,7 @@ typedef struct {
        spinlock_t kib_connd_lock;          /* serialise */
        struct ib_qp_attr kib_error_qpa;    /* QP->ERROR */
        struct kib_sched_info **kib_scheds; /* percpt data for schedulers */
-} kib_data_t;
+};
 
 #define IBLND_INIT_NOTHING 0
 #define IBLND_INIT_DATA    1
@@ -360,51 +356,51 @@ typedef struct {
  * These are sent in sender's byte order (i.e. receiver flips).
  */
 
-typedef struct kib_connparams {
+struct kib_connparams {
        __u16        ibcp_queue_depth;
        __u16        ibcp_max_frags;
        __u32        ibcp_max_msg_size;
-} WIRE_ATTR kib_connparams_t;
+} WIRE_ATTR;
 
-typedef struct {
+struct kib_immediate_msg {
        lnet_hdr_t   ibim_hdr;        /* portals header */
        char         ibim_payload[0]; /* piggy-backed payload */
-} WIRE_ATTR kib_immediate_msg_t;
+} WIRE_ATTR;
 
-typedef struct {
+struct kib_rdma_frag {
        __u32        rf_nob;          /* # bytes this frag */
        __u64        rf_addr;         /* CAVEAT EMPTOR: misaligned!! */
-} WIRE_ATTR kib_rdma_frag_t;
+} WIRE_ATTR;
 
-typedef struct {
+struct kib_rdma_desc {
        __u32           rd_key;       /* local/remote key */
        __u32           rd_nfrags;    /* # fragments */
-       kib_rdma_frag_t rd_frags[0];  /* buffer frags */
-} WIRE_ATTR kib_rdma_desc_t;
+       struct kib_rdma_frag    rd_frags[0];    /* buffer frags */
+} WIRE_ATTR;
 
-typedef struct {
+struct kib_putreq_msg {
        lnet_hdr_t      ibprm_hdr;    /* portals header */
        __u64           ibprm_cookie; /* opaque completion cookie */
-} WIRE_ATTR kib_putreq_msg_t;
+} WIRE_ATTR;
 
-typedef struct {
+struct kib_putack_msg {
        __u64           ibpam_src_cookie; /* reflected completion cookie */
        __u64           ibpam_dst_cookie; /* opaque completion cookie */
-       kib_rdma_desc_t ibpam_rd;         /* sender's sink buffer */
-} WIRE_ATTR kib_putack_msg_t;
+       struct kib_rdma_desc ibpam_rd;         /* sender's sink buffer */
+} WIRE_ATTR;
 
-typedef struct {
+struct kib_get_msg {
        lnet_hdr_t      ibgm_hdr;     /* portals header */
        __u64           ibgm_cookie;  /* opaque completion cookie */
-       kib_rdma_desc_t ibgm_rd;      /* rdma descriptor */
-} WIRE_ATTR kib_get_msg_t;
+       struct kib_rdma_desc ibgm_rd;      /* rdma descriptor */
+} WIRE_ATTR;
 
-typedef struct {
+struct kib_completion_msg {
        __u64           ibcm_cookie;  /* opaque completion cookie */
        __s32           ibcm_status;  /* < 0 failure: >= 0 length */
-} WIRE_ATTR kib_completion_msg_t;
+} WIRE_ATTR;
 
-typedef struct {
+struct kib_msg {
        /* First 2 fields fixed FOR ALL TIME */
        __u32           ibm_magic;    /* I'm an ibnal message */
        __u16           ibm_version;  /* this is my version number */
@@ -419,14 +415,14 @@ typedef struct {
        __u64           ibm_dststamp; /* destination's incarnation */
 
        union {
-               kib_connparams_t     connparams;
-               kib_immediate_msg_t  immediate;
-               kib_putreq_msg_t     putreq;
-               kib_putack_msg_t     putack;
-               kib_get_msg_t        get;
-               kib_completion_msg_t completion;
+               struct kib_connparams           connparams;
+               struct kib_immediate_msg        immediate;
+               struct kib_putreq_msg           putreq;
+               struct kib_putack_msg           putack;
+               struct kib_get_msg              get;
+               struct kib_completion_msg       completion;
        } WIRE_ATTR ibm_u;
-} WIRE_ATTR kib_msg_t;
+} WIRE_ATTR;
 
 #define IBLND_MSG_MAGIC     LNET_PROTO_IB_MAGIC /* unique magic */
 
@@ -445,14 +441,14 @@ typedef struct {
 #define IBLND_MSG_GET_REQ   0xd6       /* getreq (sink->src) */
 #define IBLND_MSG_GET_DONE  0xd7       /* completion (src->sink: all OK) */
 
-typedef struct {
+struct kib_rej {
        __u32            ibr_magic;       /* sender's magic */
        __u16            ibr_version;     /* sender's version */
        __u8             ibr_why;         /* reject reason */
        __u8             ibr_padding;     /* padding */
        __u64            ibr_incarnation; /* incarnation of peer */
-       kib_connparams_t ibr_cp;          /* connection parameters */
-} WIRE_ATTR kib_rej_t;
+       struct kib_connparams ibr_cp;          /* connection parameters */
+} WIRE_ATTR;
 
 /* connection rejection reasons */
 #define IBLND_REJECT_CONN_RACE      1 /* You lost connection race */
@@ -467,28 +463,26 @@ typedef struct {
 
 /***********************************************************************/
 
-typedef struct kib_rx                         /* receive message */
-{
+struct kib_rx {                                        /* receive message */
        struct list_head       rx_list;       /* queue for attention */
        struct kib_conn        *rx_conn;      /* owning conn */
        int                    rx_nob; /* # bytes received (-1 while posted) */
        enum ib_wc_status      rx_status;     /* completion status */
-       kib_msg_t              *rx_msg;       /* message buffer (host vaddr) */
+       struct kib_msg          *rx_msg;        /* message buffer (host vaddr) */
        __u64                  rx_msgaddr;    /* message buffer (I/O addr) */
        DECLARE_PCI_UNMAP_ADDR(rx_msgunmap);  /* for dma_unmap_single() */
        struct ib_recv_wr      rx_wrq;        /* receive work item... */
        struct ib_sge          rx_sge;        /* ...and its memory */
-} kib_rx_t;
+};
 
 #define IBLND_POSTRX_DONT_POST    0 /* don't post */
 #define IBLND_POSTRX_NO_CREDIT    1 /* post: no credits */
 #define IBLND_POSTRX_PEER_CREDIT  2 /* post: give peer back 1 credit */
 #define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */
 
-typedef struct kib_tx                         /* transmit message */
-{
+struct kib_tx {                                        /* transmit message */
        struct list_head      tx_list; /* queue on idle_txs ibc_tx_queue etc. */
-       kib_tx_pool_t         *tx_pool;       /* pool I'm from */
+       struct kib_tx_pool      *tx_pool;       /* pool I'm from */
        struct kib_conn       *tx_conn;       /* owning conn */
        short                 tx_sending;     /* # tx callbacks outstanding */
        short                 tx_queued;      /* queued for sending */
@@ -497,28 +491,28 @@ typedef struct kib_tx                         /* transmit message */
        unsigned long         tx_deadline;    /* completion deadline */
        __u64                 tx_cookie;      /* completion cookie */
        lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
-       kib_msg_t             *tx_msg;        /* message buffer (host vaddr) */
+       struct kib_msg        *tx_msg;        /* message buffer (host vaddr) */
        __u64                 tx_msgaddr;     /* message buffer (I/O addr) */
        DECLARE_PCI_UNMAP_ADDR(tx_msgunmap);  /* for dma_unmap_single() */
        int                   tx_nwrq;        /* # send work items */
        struct ib_rdma_wr     *tx_wrq;        /* send work items... */
        struct ib_sge         *tx_sge;        /* ...and their memory */
-       kib_rdma_desc_t       *tx_rd;         /* rdma descriptor */
+       struct kib_rdma_desc  *tx_rd;         /* rdma descriptor */
        int                   tx_nfrags;      /* # entries in... */
        struct scatterlist    *tx_frags;      /* dma_map_sg descriptor */
        __u64                 *tx_pages;      /* rdma phys page addrs */
-       kib_fmr_t             fmr;            /* FMR */
+       struct kib_fmr        fmr;            /* FMR */
        int                   tx_dmadir;      /* dma direction */
-} kib_tx_t;
+};
 
-typedef struct kib_connvars {
-       kib_msg_t cv_msg; /* connection-in-progress variables */
-} kib_connvars_t;
+struct kib_connvars {
+       struct kib_msg cv_msg; /* connection-in-progress variables */
+};
 
-typedef struct kib_conn {
+struct kib_conn {
        struct kib_sched_info *ibc_sched;      /* scheduler information */
        struct kib_peer       *ibc_peer;       /* owning peer */
-       kib_hca_dev_t         *ibc_hdev;       /* HCA bound on */
+       struct kib_hca_dev         *ibc_hdev;       /* HCA bound on */
        struct list_head ibc_list;             /* stash on peer's conn list */
        struct list_head      ibc_sched_list;  /* schedule for attention */
        __u16                 ibc_version;     /* version of connection */
@@ -553,14 +547,14 @@ typedef struct kib_conn {
                                               /* reserve an ACK/DONE msg */
        struct list_head ibc_active_txs; /* active tx awaiting completion */
        spinlock_t            ibc_lock;        /* serialise */
-       kib_rx_t              *ibc_rxs;        /* the rx descs */
-       kib_pages_t           *ibc_rx_pages;   /* premapped rx msg pages */
+       struct kib_rx              *ibc_rxs;        /* the rx descs */
+       struct kib_pages           *ibc_rx_pages;   /* premapped rx msg pages */
 
        struct rdma_cm_id     *ibc_cmid;       /* CM id */
        struct ib_cq          *ibc_cq;         /* completion queue */
 
-       kib_connvars_t        *ibc_connvars; /* in-progress connection state */
-} kib_conn_t;
+       struct kib_connvars     *ibc_connvars;  /* in-progress connection state */
+};
 
 #define IBLND_CONN_INIT           0     /* being initialised */
 #define IBLND_CONN_ACTIVE_CONNECT 1     /* active sending req */
@@ -569,7 +563,7 @@ typedef struct kib_conn {
 #define IBLND_CONN_CLOSING        4     /* being closed */
 #define IBLND_CONN_DISCONNECTED   5     /* disconnected */
 
-typedef struct kib_peer {
+struct kib_peer {
        struct list_head ibp_list;        /* stash on global peer list */
        lnet_nid_t       ibp_nid;         /* who's on the other end(s) */
        lnet_ni_t        *ibp_ni;         /* LNet interface */
@@ -596,11 +590,11 @@ typedef struct kib_peer {
        __u16            ibp_max_frags;
        /* max_peer_credits */
        __u16            ibp_queue_depth;
-} kib_peer_t;
+};
 
-extern kib_data_t kiblnd_data;
+extern struct kib_data kiblnd_data;
 
-void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+void kiblnd_hdev_destroy(struct kib_hca_dev *hdev);
 
 int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
 
@@ -645,14 +639,14 @@ kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
 }
 
 static inline void
-kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
+kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev)
 {
        LASSERT(atomic_read(&hdev->ibh_ref) > 0);
        atomic_inc(&hdev->ibh_ref);
 }
 
 static inline void
-kiblnd_hdev_decref(kib_hca_dev_t *hdev)
+kiblnd_hdev_decref(struct kib_hca_dev *hdev)
 {
        LASSERT(atomic_read(&hdev->ibh_ref) > 0);
        if (atomic_dec_and_test(&hdev->ibh_ref))
@@ -660,7 +654,7 @@ kiblnd_hdev_decref(kib_hca_dev_t *hdev)
 }
 
 static inline int
-kiblnd_dev_can_failover(kib_dev_t *dev)
+kiblnd_dev_can_failover(struct kib_dev *dev)
 {
        if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
                return 0;
@@ -716,7 +710,7 @@ do {                                                            \
 } while (0)
 
 static inline bool
-kiblnd_peer_connecting(kib_peer_t *peer)
+kiblnd_peer_connecting(struct kib_peer *peer)
 {
        return peer->ibp_connecting ||
               peer->ibp_reconnecting ||
@@ -724,7 +718,7 @@ kiblnd_peer_connecting(kib_peer_t *peer)
 }
 
 static inline bool
-kiblnd_peer_idle(kib_peer_t *peer)
+kiblnd_peer_idle(struct kib_peer *peer)
 {
        return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns);
 }
@@ -739,23 +733,23 @@ kiblnd_nid2peerlist(lnet_nid_t nid)
 }
 
 static inline int
-kiblnd_peer_active(kib_peer_t *peer)
+kiblnd_peer_active(struct kib_peer *peer)
 {
        /* Am I in the peer hash table? */
        return !list_empty(&peer->ibp_list);
 }
 
-static inline kib_conn_t *
-kiblnd_get_conn_locked(kib_peer_t *peer)
+static inline struct kib_conn *
+kiblnd_get_conn_locked(struct kib_peer *peer)
 {
        LASSERT(!list_empty(&peer->ibp_conns));
 
        /* just return the first connection */
-       return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
+       return list_entry(peer->ibp_conns.next, struct kib_conn, ibc_list);
 }
 
 static inline int
-kiblnd_send_keepalive(kib_conn_t *conn)
+kiblnd_send_keepalive(struct kib_conn *conn)
 {
        return (*kiblnd_tunables.kib_keepalive > 0) &&
                cfs_time_after(jiffies, conn->ibc_last_send +
@@ -764,7 +758,7 @@ kiblnd_send_keepalive(kib_conn_t *conn)
 }
 
 static inline int
-kiblnd_need_noop(kib_conn_t *conn)
+kiblnd_need_noop(struct kib_conn *conn)
 {
        struct lnet_ioctl_config_o2iblnd_tunables *tunables;
        lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
@@ -800,14 +794,14 @@ kiblnd_need_noop(kib_conn_t *conn)
 }
 
 static inline void
-kiblnd_abort_receives(kib_conn_t *conn)
+kiblnd_abort_receives(struct kib_conn *conn)
 {
        ib_modify_qp(conn->ibc_cmid->qp,
                     &kiblnd_data.kib_error_qpa, IB_QP_STATE);
 }
 
 static inline const char *
-kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
+kiblnd_queue2str(struct kib_conn *conn, struct list_head *q)
 {
        if (q == &conn->ibc_tx_queue)
                return "tx_queue";
@@ -858,21 +852,21 @@ kiblnd_wreqid2type(__u64 wreqid)
 }
 
 static inline void
-kiblnd_set_conn_state(kib_conn_t *conn, int state)
+kiblnd_set_conn_state(struct kib_conn *conn, int state)
 {
        conn->ibc_state = state;
        mb();
 }
 
 static inline void
-kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
+kiblnd_init_msg(struct kib_msg *msg, int type, int body_nob)
 {
        msg->ibm_type = type;
-       msg->ibm_nob  = offsetof(kib_msg_t, ibm_u) + body_nob;
+       msg->ibm_nob  = offsetof(struct kib_msg, ibm_u) + body_nob;
 }
 
 static inline int
-kiblnd_rd_size(kib_rdma_desc_t *rd)
+kiblnd_rd_size(struct kib_rdma_desc *rd)
 {
        int   i;
        int   size;
@@ -884,25 +878,25 @@ kiblnd_rd_size(kib_rdma_desc_t *rd)
 }
 
 static inline __u64
-kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
+kiblnd_rd_frag_addr(struct kib_rdma_desc *rd, int index)
 {
        return rd->rd_frags[index].rf_addr;
 }
 
 static inline __u32
-kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
+kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index)
 {
        return rd->rd_frags[index].rf_nob;
 }
 
 static inline __u32
-kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
+kiblnd_rd_frag_key(struct kib_rdma_desc *rd, int index)
 {
        return rd->rd_key;
 }
 
 static inline int
-kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
+kiblnd_rd_consume_frag(struct kib_rdma_desc *rd, int index, __u32 nob)
 {
        if (nob < rd->rd_frags[index].rf_nob) {
                rd->rd_frags[index].rf_addr += nob;
@@ -915,14 +909,14 @@ kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
 }
 
 static inline int
-kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
+kiblnd_rd_msg_size(struct kib_rdma_desc *rd, int msgtype, int n)
 {
        LASSERT(msgtype == IBLND_MSG_GET_REQ ||
                msgtype == IBLND_MSG_PUT_ACK);
 
        return msgtype == IBLND_MSG_GET_REQ ?
-              offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
-              offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
+              offsetof(struct kib_get_msg, ibgm_rd.rd_frags[n]) :
+              offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[n]);
 }
 
 static inline __u64
@@ -981,17 +975,17 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
 #define KIBLND_CONN_PARAM(e)     ((e)->param.conn.private_data)
 #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
 
-struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd,
                                    int negotiated_nfrags);
-void kiblnd_map_rx_descs(kib_conn_t *conn);
-void kiblnd_unmap_rx_descs(kib_conn_t *conn);
-void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
-struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
+void kiblnd_map_rx_descs(struct kib_conn *conn);
+void kiblnd_unmap_rx_descs(struct kib_conn *conn);
+void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);
+struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps);
 
-int  kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
-                        kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
-                        kib_fmr_t *fmr);
-void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
+int  kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
+                        struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
+                        struct kib_fmr *fmr);
+void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status);
 
 int kiblnd_tunables_setup(struct lnet_ni *ni);
 void kiblnd_tunables_init(void);
@@ -1001,30 +995,31 @@ int  kiblnd_scheduler(void *arg);
 int  kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
 int  kiblnd_failover_thread(void *arg);
 
-int  kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
+int  kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages);
 
 int  kiblnd_cm_callback(struct rdma_cm_id *cmid,
                        struct rdma_cm_event *event);
 int  kiblnd_translate_mtu(int value);
 
-int  kiblnd_dev_failover(kib_dev_t *dev);
-int  kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
-void kiblnd_destroy_peer(kib_peer_t *peer);
-bool kiblnd_reconnect_peer(kib_peer_t *peer);
-void kiblnd_destroy_dev(kib_dev_t *dev);
-void kiblnd_unlink_peer_locked(kib_peer_t *peer);
-kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
-int  kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+int  kiblnd_dev_failover(struct kib_dev *dev);
+int  kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid);
+void kiblnd_destroy_peer(struct kib_peer *peer);
+bool kiblnd_reconnect_peer(struct kib_peer *peer);
+void kiblnd_destroy_dev(struct kib_dev *dev);
+void kiblnd_unlink_peer_locked(struct kib_peer *peer);
+struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid);
+int  kiblnd_close_stale_conns_locked(struct kib_peer *peer,
                                     int version, __u64 incarnation);
-int  kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
+int  kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
 
-kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
-                              int state, int version);
-void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn);
-void kiblnd_close_conn(kib_conn_t *conn, int error);
-void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
+struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
+                                   struct rdma_cm_id *cmid,
+                                   int state, int version);
+void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
+void kiblnd_close_conn(struct kib_conn *conn, int error);
+void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
 
-void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
+void kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid);
 void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
                        int status);
 
@@ -1032,10 +1027,10 @@ void kiblnd_qp_event(struct ib_event *event, void *arg);
 void kiblnd_cq_event(struct ib_event *event, void *arg);
 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
 
-void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
+void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version,
                     int credits, lnet_nid_t dstnid, __u64 dststamp);
-int  kiblnd_unpack_msg(kib_msg_t *msg, int nob);
-int  kiblnd_post_rx(kib_rx_t *rx, int credit);
+int  kiblnd_unpack_msg(struct kib_msg *msg, int nob);
+int  kiblnd_post_rx(struct kib_rx *rx, int credit);
 
 int  kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
 int  kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
index 845e49a52430db2f091ed4ff7bbeeed8f52efad7..596a697b9d39db3865ca476ff2788b4c2cbefce1 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
 
 #include "o2iblnd.h"
 
-static void kiblnd_peer_alive(kib_peer_t *peer);
-static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
-static void kiblnd_check_sends(kib_conn_t *conn);
-static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx,
+static void kiblnd_peer_alive(struct kib_peer *peer);
+static void kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error);
+static void kiblnd_check_sends(struct kib_conn *conn);
+static void kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx,
                                int type, int body_nob);
-static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
-                            int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
-static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
-static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
+static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+                            int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie);
+static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
+static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
+static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx);
 
 static void
-kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
+kiblnd_tx_done(lnet_ni_t *ni, struct kib_tx *tx)
 {
        lnet_msg_t *lntmsg[2];
-       kib_net_t *net = ni->ni_data;
+       struct kib_net *net = ni->ni_data;
        int rc;
        int i;
 
@@ -97,10 +93,10 @@ kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx)
 void
 kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
 {
-       kib_tx_t *tx;
+       struct kib_tx *tx;
 
        while (!list_empty(txlist)) {
-               tx = list_entry(txlist->next, kib_tx_t, tx_list);
+               tx = list_entry(txlist->next, struct kib_tx, tx_list);
 
                list_del(&tx->tx_list);
                /* complete now */
@@ -110,19 +106,19 @@ kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
        }
 }
 
-static kib_tx_t *
+static struct kib_tx *
 kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
 {
-       kib_net_t *net = (kib_net_t *)ni->ni_data;
+       struct kib_net *net = (struct kib_net *)ni->ni_data;
        struct list_head *node;
-       kib_tx_t *tx;
-       kib_tx_poolset_t *tps;
+       struct kib_tx *tx;
+       struct kib_tx_poolset *tps;
 
        tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)];
        node = kiblnd_pool_alloc_node(&tps->tps_poolset);
        if (!node)
                return NULL;
-       tx = list_entry(node, kib_tx_t, tx_list);
+       tx = list_entry(node, struct kib_tx, tx_list);
 
        LASSERT(!tx->tx_nwrq);
        LASSERT(!tx->tx_queued);
@@ -138,9 +134,9 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
 }
 
 static void
-kiblnd_drop_rx(kib_rx_t *rx)
+kiblnd_drop_rx(struct kib_rx *rx)
 {
-       kib_conn_t *conn = rx->rx_conn;
+       struct kib_conn *conn = rx->rx_conn;
        struct kib_sched_info *sched = conn->ibc_sched;
        unsigned long flags;
 
@@ -153,10 +149,10 @@ kiblnd_drop_rx(kib_rx_t *rx)
 }
 
 int
-kiblnd_post_rx(kib_rx_t *rx, int credit)
+kiblnd_post_rx(struct kib_rx *rx, int credit)
 {
-       kib_conn_t *conn = rx->rx_conn;
-       kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
+       struct kib_conn *conn = rx->rx_conn;
+       struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
        struct ib_recv_wr *bad_wrq = NULL;
        struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
        int rc;
@@ -223,13 +219,13 @@ out:
        return rc;
 }
 
-static kib_tx_t *
-kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
+static struct kib_tx *
+kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, __u64 cookie)
 {
        struct list_head *tmp;
 
        list_for_each(tmp, &conn->ibc_active_txs) {
-               kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
+               struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
 
                LASSERT(!tx->tx_queued);
                LASSERT(tx->tx_sending || tx->tx_waiting);
@@ -249,9 +245,9 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
 }
 
 static void
-kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
+kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, __u64 cookie)
 {
-       kib_tx_t *tx;
+       struct kib_tx *tx;
        lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
        int idle;
 
@@ -287,10 +283,10 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
 }
 
 static void
-kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
+kiblnd_send_completion(struct kib_conn *conn, int type, int status, __u64 cookie)
 {
        lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
-       kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+       struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
 
        if (!tx) {
                CERROR("Can't get tx for completion %x for %s\n",
@@ -300,19 +296,19 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
 
        tx->tx_msg->ibm_u.completion.ibcm_status = status;
        tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
-       kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
+       kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
 
        kiblnd_queue_tx(tx, conn);
 }
 
 static void
-kiblnd_handle_rx(kib_rx_t *rx)
+kiblnd_handle_rx(struct kib_rx *rx)
 {
-       kib_msg_t *msg = rx->rx_msg;
-       kib_conn_t *conn = rx->rx_conn;
+       struct kib_msg *msg = rx->rx_msg;
+       struct kib_conn *conn = rx->rx_conn;
        lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
        int credits = msg->ibm_credits;
-       kib_tx_t *tx;
+       struct kib_tx *tx;
        int rc = 0;
        int rc2;
        int post_credit;
@@ -467,12 +463,12 @@ kiblnd_handle_rx(kib_rx_t *rx)
 }
 
 static void
-kiblnd_rx_complete(kib_rx_t *rx, int status, int nob)
+kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
 {
-       kib_msg_t *msg = rx->rx_msg;
-       kib_conn_t *conn = rx->rx_conn;
+       struct kib_msg *msg = rx->rx_msg;
+       struct kib_conn *conn = rx->rx_conn;
        lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
-       kib_net_t *net = ni->ni_data;
+       struct kib_net *net = ni->ni_data;
        int rc;
        int err = -EIO;
 
@@ -561,10 +557,10 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
 }
 
 static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
+kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc *rd, __u32 nob)
 {
-       kib_hca_dev_t *hdev;
-       kib_fmr_poolset_t *fps;
+       struct kib_hca_dev *hdev;
+       struct kib_fmr_poolset *fps;
        int cpt;
        int rc;
 
@@ -593,9 +589,9 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
        return 0;
 }
 
-static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
+static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx)
 {
-       kib_net_t *net = ni->ni_data;
+       struct kib_net *net = ni->ni_data;
 
        LASSERT(net);
 
@@ -609,11 +605,11 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
        }
 }
 
-static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+static int kiblnd_map_tx(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
                         int nfrags)
 {
-       kib_net_t *net = ni->ni_data;
-       kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+       struct kib_net *net = ni->ni_data;
+       struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
        struct ib_mr *mr    = NULL;
        __u32 nob;
        int i;
@@ -651,10 +647,10 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
 }
 
 static int
-kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+kiblnd_setup_rd_iov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
                    unsigned int niov, struct kvec *iov, int offset, int nob)
 {
-       kib_net_t *net = ni->ni_data;
+       struct kib_net *net = ni->ni_data;
        struct page *page;
        struct scatterlist *sg;
        unsigned long vaddr;
@@ -689,6 +685,10 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
 
                sg_set_page(sg, page, fragnob, page_offset);
                sg = sg_next(sg);
+               if (!sg) {
+                       CERROR("lacking enough sg entries to map tx\n");
+                       return -EFAULT;
+               }
 
                if (offset + fragnob < iov->iov_len) {
                        offset += fragnob;
@@ -704,10 +704,10 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
 }
 
 static int
-kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
+kiblnd_setup_rd_kiov(lnet_ni_t *ni, struct kib_tx *tx, struct kib_rdma_desc *rd,
                     int nkiov, lnet_kiov_t *kiov, int offset, int nob)
 {
-       kib_net_t *net = ni->ni_data;
+       struct kib_net *net = ni->ni_data;
        struct scatterlist *sg;
        int fragnob;
 
@@ -733,6 +733,10 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
                sg_set_page(sg, kiov->kiov_page, fragnob,
                            kiov->kiov_offset + offset);
                sg = sg_next(sg);
+               if (!sg) {
+                       CERROR("lacking enough sg entries to map tx\n");
+                       return -EFAULT;
+               }
 
                offset = 0;
                kiov++;
@@ -744,11 +748,11 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
 }
 
 static int
-kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
+kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
        __must_hold(&conn->ibc_lock)
 {
-       kib_msg_t *msg = tx->tx_msg;
-       kib_peer_t *peer = conn->ibc_peer;
+       struct kib_msg *msg = tx->tx_msg;
+       struct kib_peer *peer = conn->ibc_peer;
        struct lnet_ni *ni = peer->ibp_ni;
        int ver = conn->ibc_version;
        int rc;
@@ -901,11 +905,11 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
 }
 
 static void
-kiblnd_check_sends(kib_conn_t *conn)
+kiblnd_check_sends(struct kib_conn *conn)
 {
        int ver = conn->ibc_version;
        lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
-       kib_tx_t *tx;
+       struct kib_tx *tx;
 
        /* Don't send anything until after the connection is established */
        if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
@@ -924,7 +928,7 @@ kiblnd_check_sends(kib_conn_t *conn)
        while (conn->ibc_reserved_credits > 0 &&
               !list_empty(&conn->ibc_tx_queue_rsrvd)) {
                tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
-                               kib_tx_t, tx_list);
+                               struct kib_tx, tx_list);
                list_del(&tx->tx_list);
                list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
                conn->ibc_reserved_credits--;
@@ -948,16 +952,16 @@ kiblnd_check_sends(kib_conn_t *conn)
                if (!list_empty(&conn->ibc_tx_queue_nocred)) {
                        credit = 0;
                        tx = list_entry(conn->ibc_tx_queue_nocred.next,
-                                       kib_tx_t, tx_list);
+                                       struct kib_tx, tx_list);
                } else if (!list_empty(&conn->ibc_tx_noops)) {
                        LASSERT(!IBLND_OOB_CAPABLE(ver));
                        credit = 1;
                        tx = list_entry(conn->ibc_tx_noops.next,
-                                       kib_tx_t, tx_list);
+                                       struct kib_tx, tx_list);
                } else if (!list_empty(&conn->ibc_tx_queue)) {
                        credit = 1;
                        tx = list_entry(conn->ibc_tx_queue.next,
-                                       kib_tx_t, tx_list);
+                                       struct kib_tx, tx_list);
                } else {
                        break;
                }
@@ -970,10 +974,10 @@ kiblnd_check_sends(kib_conn_t *conn)
 }
 
 static void
-kiblnd_tx_complete(kib_tx_t *tx, int status)
+kiblnd_tx_complete(struct kib_tx *tx, int status)
 {
        int failed = (status != IB_WC_SUCCESS);
-       kib_conn_t *conn = tx->tx_conn;
+       struct kib_conn *conn = tx->tx_conn;
        int idle;
 
        LASSERT(tx->tx_sending > 0);
@@ -1025,12 +1029,12 @@ kiblnd_tx_complete(kib_tx_t *tx, int status)
 }
 
 static void
-kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
+kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx, int type, int body_nob)
 {
-       kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+       struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
        struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
        struct ib_rdma_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
-       int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+       int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
        struct ib_mr *mr = hdev->ibh_mrs;
 
        LASSERT(tx->tx_nwrq >= 0);
@@ -1057,11 +1061,11 @@ kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
 }
 
 static int
-kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
-                int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+                int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie)
 {
-       kib_msg_t *ibmsg = tx->tx_msg;
-       kib_rdma_desc_t *srcrd = tx->tx_rd;
+       struct kib_msg *ibmsg = tx->tx_msg;
+       struct kib_rdma_desc *srcrd = tx->tx_rd;
        struct ib_sge *sge = &tx->tx_sge[0];
        struct ib_rdma_wr *wrq, *next;
        int rc  = resid;
@@ -1099,7 +1103,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
 
                wrknob = min(min(kiblnd_rd_frag_size(srcrd, srcidx),
                                 kiblnd_rd_frag_size(dstrd, dstidx)),
-                            (__u32) resid);
+                            (__u32)resid);
 
                sge = &tx->tx_sge[tx->tx_nwrq];
                sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
@@ -1135,13 +1139,13 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
        ibmsg->ibm_u.completion.ibcm_status = rc;
        ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
        kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
-                          type, sizeof(kib_completion_msg_t));
+                          type, sizeof(struct kib_completion_msg));
 
        return rc;
 }
 
 static void
-kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
 {
        struct list_head *q;
 
@@ -1196,7 +1200,7 @@ kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
 }
 
 static void
-kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
 {
        spin_lock(&conn->ibc_lock);
        kiblnd_queue_tx_locked(tx, conn);
@@ -1243,11 +1247,11 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
 }
 
 static void
-kiblnd_connect_peer(kib_peer_t *peer)
+kiblnd_connect_peer(struct kib_peer *peer)
 {
        struct rdma_cm_id *cmid;
-       kib_dev_t *dev;
-       kib_net_t *net = peer->ibp_ni->ni_data;
+       struct kib_dev *dev;
+       struct kib_net *net = peer->ibp_ni->ni_data;
        struct sockaddr_in srcaddr;
        struct sockaddr_in dstaddr;
        int rc;
@@ -1311,7 +1315,7 @@ kiblnd_connect_peer(kib_peer_t *peer)
 }
 
 bool
-kiblnd_reconnect_peer(kib_peer_t *peer)
+kiblnd_reconnect_peer(struct kib_peer *peer)
 {
        rwlock_t *glock = &kiblnd_data.kib_global_lock;
        char *reason = NULL;
@@ -1361,11 +1365,11 @@ no_reconnect:
 }
 
 void
-kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
+kiblnd_launch_tx(lnet_ni_t *ni, struct kib_tx *tx, lnet_nid_t nid)
 {
-       kib_peer_t *peer;
-       kib_peer_t *peer2;
-       kib_conn_t *conn;
+       struct kib_peer *peer;
+       struct kib_peer *peer2;
+       struct kib_conn *conn;
        rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
        unsigned long flags;
        int rc;
@@ -1468,7 +1472,7 @@ kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
        peer->ibp_connecting = 1;
 
        /* always called with a ref on ni, which prevents ni being shutdown */
-       LASSERT(!((kib_net_t *)ni->ni_data)->ibn_shutdown);
+       LASSERT(!((struct kib_net *)ni->ni_data)->ibn_shutdown);
 
        if (tx)
                list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
@@ -1495,9 +1499,9 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
        lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
        unsigned int payload_offset = lntmsg->msg_offset;
        unsigned int payload_nob = lntmsg->msg_len;
-       kib_msg_t *ibmsg;
-       kib_rdma_desc_t  *rd;
-       kib_tx_t *tx;
+       struct kib_msg *ibmsg;
+       struct kib_rdma_desc  *rd;
+       struct kib_tx *tx;
        int nob;
        int rc;
 
@@ -1528,7 +1532,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                        break;            /* send IMMEDIATE */
 
                /* is the REPLY message too small for RDMA? */
-               nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
+               nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
                if (nob <= IBLND_MSG_SIZE)
                        break;            /* send IMMEDIATE */
 
@@ -1558,7 +1562,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                        return -EIO;
                }
 
-               nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
+               nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
                ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
                ibmsg->ibm_u.get.ibgm_hdr = *hdr;
 
@@ -1580,7 +1584,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
        case LNET_MSG_REPLY:
        case LNET_MSG_PUT:
                /* Is the payload small enough not to need RDMA? */
-               nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
+               nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
                if (nob <= IBLND_MSG_SIZE)
                        break;            /* send IMMEDIATE */
 
@@ -1610,7 +1614,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
                ibmsg = tx->tx_msg;
                ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
                ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
-               kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
+               kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(struct kib_putreq_msg));
 
                tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
                tx->tx_waiting = 1;          /* waiting for PUT_{ACK,NAK} */
@@ -1620,7 +1624,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 
        /* send IMMEDIATE */
 
-       LASSERT(offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
+       LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
                 <= IBLND_MSG_SIZE);
 
        tx = kiblnd_get_idle_tx(ni, target.nid);
@@ -1635,16 +1639,16 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 
        if (payload_kiov)
                lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
-                                   offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+                                   offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
                                    payload_niov, payload_kiov,
                                    payload_offset, payload_nob);
        else
                lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
-                                  offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+                                  offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
                                   payload_niov, payload_iov,
                                   payload_offset, payload_nob);
 
-       nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
+       nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
        kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
 
        tx->tx_lntmsg[0] = lntmsg;            /* finalise lntmsg on completion */
@@ -1653,7 +1657,7 @@ kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
 }
 
 static void
-kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
+kiblnd_reply(lnet_ni_t *ni, struct kib_rx *rx, lnet_msg_t *lntmsg)
 {
        lnet_process_id_t target = lntmsg->msg_target;
        unsigned int niov = lntmsg->msg_niov;
@@ -1661,7 +1665,7 @@ kiblnd_reply(lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
        lnet_kiov_t *kiov = lntmsg->msg_kiov;
        unsigned int offset = lntmsg->msg_offset;
        unsigned int nob = lntmsg->msg_len;
-       kib_tx_t *tx;
+       struct kib_tx *tx;
        int rc;
 
        tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
@@ -1718,10 +1722,10 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
            unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
            unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
-       kib_rx_t *rx = private;
-       kib_msg_t *rxmsg = rx->rx_msg;
-       kib_conn_t *conn = rx->rx_conn;
-       kib_tx_t *tx;
+       struct kib_rx *rx = private;
+       struct kib_msg *rxmsg = rx->rx_msg;
+       struct kib_conn *conn = rx->rx_conn;
+       struct kib_tx *tx;
        int nob;
        int post_credit = IBLND_POSTRX_PEER_CREDIT;
        int rc = 0;
@@ -1736,7 +1740,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
                LBUG();
 
        case IBLND_MSG_IMMEDIATE:
-               nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
+               nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
                if (nob > rx->rx_nob) {
                        CERROR("Immediate message from %s too big: %d(%d)\n",
                               libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
@@ -1748,19 +1752,19 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
                if (kiov)
                        lnet_copy_flat2kiov(niov, kiov, offset,
                                            IBLND_MSG_SIZE, rxmsg,
-                                           offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+                                           offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
                                            mlen);
                else
                        lnet_copy_flat2iov(niov, iov, offset,
                                           IBLND_MSG_SIZE, rxmsg,
-                                          offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+                                          offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
                                           mlen);
                lnet_finalize(ni, lntmsg, 0);
                break;
 
        case IBLND_MSG_PUT_REQ: {
-               kib_msg_t       *txmsg;
-               kib_rdma_desc_t *rd;
+               struct kib_msg  *txmsg;
+               struct kib_rdma_desc *rd;
 
                if (!mlen) {
                        lnet_finalize(ni, lntmsg, 0);
@@ -1796,7 +1800,7 @@ kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
                        break;
                }
 
-               nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
+               nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
                txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
                txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
 
@@ -1847,7 +1851,7 @@ kiblnd_thread_fini(void)
 }
 
 static void
-kiblnd_peer_alive(kib_peer_t *peer)
+kiblnd_peer_alive(struct kib_peer *peer)
 {
        /* This is racy, but everyone's only writing cfs_time_current() */
        peer->ibp_last_alive = cfs_time_current();
@@ -1855,7 +1859,7 @@ kiblnd_peer_alive(kib_peer_t *peer)
 }
 
 static void
-kiblnd_peer_notify(kib_peer_t *peer)
+kiblnd_peer_notify(struct kib_peer *peer)
 {
        int error = 0;
        unsigned long last_alive = 0;
@@ -1878,7 +1882,7 @@ kiblnd_peer_notify(kib_peer_t *peer)
 }
 
 void
-kiblnd_close_conn_locked(kib_conn_t *conn, int error)
+kiblnd_close_conn_locked(struct kib_conn *conn, int error)
 {
        /*
         * This just does the immediate housekeeping. 'error' is zero for a
@@ -1888,8 +1892,8 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
         * already dealing with it (either to set it up or tear it down).
         * Caller holds kib_global_lock exclusively in irq context
         */
-       kib_peer_t *peer = conn->ibc_peer;
-       kib_dev_t *dev;
+       struct kib_peer *peer = conn->ibc_peer;
+       struct kib_dev *dev;
        unsigned long flags;
 
        LASSERT(error || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
@@ -1918,7 +1922,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
                        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
        }
 
-       dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
+       dev = ((struct kib_net *)peer->ibp_ni->ni_data)->ibn_dev;
        list_del(&conn->ibc_list);
        /* connd (see below) takes over ibc_list's ref */
 
@@ -1948,7 +1952,7 @@ kiblnd_close_conn_locked(kib_conn_t *conn, int error)
 }
 
 void
-kiblnd_close_conn(kib_conn_t *conn, int error)
+kiblnd_close_conn(struct kib_conn *conn, int error)
 {
        unsigned long flags;
 
@@ -1960,11 +1964,11 @@ kiblnd_close_conn(kib_conn_t *conn, int error)
 }
 
 static void
-kiblnd_handle_early_rxs(kib_conn_t *conn)
+kiblnd_handle_early_rxs(struct kib_conn *conn)
 {
        unsigned long flags;
-       kib_rx_t *rx;
-       kib_rx_t *tmp;
+       struct kib_rx *rx;
+       struct kib_rx *tmp;
 
        LASSERT(!in_interrupt());
        LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
@@ -1982,17 +1986,17 @@ kiblnd_handle_early_rxs(kib_conn_t *conn)
 }
 
 static void
-kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
+kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
 {
        LIST_HEAD(zombies);
        struct list_head *tmp;
        struct list_head *nxt;
-       kib_tx_t *tx;
+       struct kib_tx *tx;
 
        spin_lock(&conn->ibc_lock);
 
        list_for_each_safe(tmp, nxt, txs) {
-               tx = list_entry(tmp, kib_tx_t, tx_list);
+               tx = list_entry(tmp, struct kib_tx, tx_list);
 
                if (txs == &conn->ibc_active_txs) {
                        LASSERT(!tx->tx_queued);
@@ -2017,7 +2021,7 @@ kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
 }
 
 static void
-kiblnd_finalise_conn(kib_conn_t *conn)
+kiblnd_finalise_conn(struct kib_conn *conn)
 {
        LASSERT(!in_interrupt());
        LASSERT(conn->ibc_state > IBLND_CONN_INIT);
@@ -2045,7 +2049,7 @@ kiblnd_finalise_conn(kib_conn_t *conn)
 }
 
 static void
-kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
+kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error)
 {
        LIST_HEAD(zombies);
        unsigned long flags;
@@ -2099,11 +2103,11 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
 }
 
 static void
-kiblnd_connreq_done(kib_conn_t *conn, int status)
+kiblnd_connreq_done(struct kib_conn *conn, int status)
 {
-       kib_peer_t *peer = conn->ibc_peer;
-       kib_tx_t *tx;
-       kib_tx_t *tmp;
+       struct kib_peer *peer = conn->ibc_peer;
+       struct kib_tx *tx;
+       struct kib_tx *tmp;
        struct list_head txs;
        unsigned long flags;
        int active;
@@ -2209,7 +2213,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status)
 }
 
 static void
-kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
+kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
 {
        int rc;
 
@@ -2223,17 +2227,17 @@ static int
 kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 {
        rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
-       kib_msg_t *reqmsg = priv;
-       kib_msg_t *ackmsg;
-       kib_dev_t *ibdev;
-       kib_peer_t *peer;
-       kib_peer_t *peer2;
-       kib_conn_t *conn;
+       struct kib_msg *reqmsg = priv;
+       struct kib_msg *ackmsg;
+       struct kib_dev *ibdev;
+       struct kib_peer *peer;
+       struct kib_peer *peer2;
+       struct kib_conn *conn;
        lnet_ni_t *ni  = NULL;
-       kib_net_t *net = NULL;
+       struct kib_net *net = NULL;
        lnet_nid_t nid;
        struct rdma_conn_param cp;
-       kib_rej_t rej;
+       struct kib_rej rej;
        int version = IBLND_MSG_VERSION;
        unsigned long flags;
        int rc;
@@ -2242,7 +2246,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
        LASSERT(!in_interrupt());
 
        /* cmid inherits 'context' from the corresponding listener id */
-       ibdev = (kib_dev_t *)cmid->context;
+       ibdev = (struct kib_dev *)cmid->context;
        LASSERT(ibdev);
 
        memset(&rej, 0, sizeof(rej));
@@ -2260,7 +2264,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
                goto failed;
        }
 
-       if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
+       if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
                CERROR("Short connection request\n");
                goto failed;
        }
@@ -2295,7 +2299,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
        ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
 
        if (ni) {
-               net = (kib_net_t *)ni->ni_data;
+               net = (struct kib_net *)ni->ni_data;
                rej.ibr_incarnation = net->ibn_incarnation;
        }
 
@@ -2534,11 +2538,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
 }
 
 static void
-kiblnd_check_reconnect(kib_conn_t *conn, int version,
-                      __u64 incarnation, int why, kib_connparams_t *cp)
+kiblnd_check_reconnect(struct kib_conn *conn, int version,
+                      __u64 incarnation, int why, struct kib_connparams *cp)
 {
        rwlock_t *glock = &kiblnd_data.kib_global_lock;
-       kib_peer_t *peer = conn->ibc_peer;
+       struct kib_peer *peer = conn->ibc_peer;
        char *reason;
        int msg_size = IBLND_MSG_SIZE;
        int frag_num = -1;
@@ -2647,9 +2651,9 @@ out:
 }
 
 static void
-kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
+kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
 {
-       kib_peer_t *peer = conn->ibc_peer;
+       struct kib_peer *peer = conn->ibc_peer;
 
        LASSERT(!in_interrupt());
        LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
@@ -2667,9 +2671,9 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
                break;
 
        case IB_CM_REJ_CONSUMER_DEFINED:
-               if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
-                       kib_rej_t *rej = priv;
-                       kib_connparams_t *cp = NULL;
+               if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
+                       struct kib_rej *rej = priv;
+                       struct kib_connparams *cp = NULL;
                        int flip = 0;
                        __u64 incarnation = -1;
 
@@ -2692,7 +2696,7 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
                                flip = 1;
                        }
 
-                       if (priv_nob >= sizeof(kib_rej_t) &&
+                       if (priv_nob >= sizeof(struct kib_rej) &&
                            rej->ibr_version > IBLND_MSG_VERSION_1) {
                                /*
                                 * priv_nob is always 148 in current version
@@ -2775,12 +2779,12 @@ kiblnd_rejected(kib_conn_t *conn, int reason, void *priv, int priv_nob)
 }
 
 static void
-kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
+kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
 {
-       kib_peer_t *peer = conn->ibc_peer;
+       struct kib_peer *peer = conn->ibc_peer;
        lnet_ni_t *ni = peer->ibp_ni;
-       kib_net_t *net = ni->ni_data;
-       kib_msg_t *msg = priv;
+       struct kib_net *net = ni->ni_data;
+       struct kib_msg *msg = priv;
        int ver = conn->ibc_version;
        int rc = kiblnd_unpack_msg(msg, priv_nob);
        unsigned long flags;
@@ -2877,9 +2881,9 @@ kiblnd_check_connreply(kib_conn_t *conn, void *priv, int priv_nob)
 static int
 kiblnd_active_connect(struct rdma_cm_id *cmid)
 {
-       kib_peer_t *peer = (kib_peer_t *)cmid->context;
-       kib_conn_t *conn;
-       kib_msg_t *msg;
+       struct kib_peer *peer = (struct kib_peer *)cmid->context;
+       struct kib_conn *conn;
+       struct kib_msg *msg;
        struct rdma_conn_param cp;
        int version;
        __u64 incarnation;
@@ -2944,8 +2948,8 @@ kiblnd_active_connect(struct rdma_cm_id *cmid)
 int
 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 {
-       kib_peer_t *peer;
-       kib_conn_t *conn;
+       struct kib_peer *peer;
+       struct kib_conn *conn;
        int rc;
 
        switch (event->event) {
@@ -2963,7 +2967,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                return rc;
 
        case RDMA_CM_EVENT_ADDR_ERROR:
-               peer = (kib_peer_t *)cmid->context;
+               peer = (struct kib_peer *)cmid->context;
                CNETERR("%s: ADDR ERROR %d\n",
                        libcfs_nid2str(peer->ibp_nid), event->status);
                kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
@@ -2971,7 +2975,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                return -EHOSTUNREACH;      /* rc destroys cmid */
 
        case RDMA_CM_EVENT_ADDR_RESOLVED:
-               peer = (kib_peer_t *)cmid->context;
+               peer = (struct kib_peer *)cmid->context;
 
                CDEBUG(D_NET, "%s Addr resolved: %d\n",
                       libcfs_nid2str(peer->ibp_nid), event->status);
@@ -2994,7 +2998,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                return rc;                    /* rc destroys cmid */
 
        case RDMA_CM_EVENT_ROUTE_ERROR:
-               peer = (kib_peer_t *)cmid->context;
+               peer = (struct kib_peer *)cmid->context;
                CNETERR("%s: ROUTE ERROR %d\n",
                        libcfs_nid2str(peer->ibp_nid), event->status);
                kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
@@ -3002,7 +3006,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                return -EHOSTUNREACH;      /* rc destroys cmid */
 
        case RDMA_CM_EVENT_ROUTE_RESOLVED:
-               peer = (kib_peer_t *)cmid->context;
+               peer = (struct kib_peer *)cmid->context;
                CDEBUG(D_NET, "%s Route resolved: %d\n",
                       libcfs_nid2str(peer->ibp_nid), event->status);
 
@@ -3016,7 +3020,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                return event->status;      /* rc destroys cmid */
 
        case RDMA_CM_EVENT_UNREACHABLE:
-               conn = (kib_conn_t *)cmid->context;
+               conn = (struct kib_conn *)cmid->context;
                LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
                        conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
                CNETERR("%s: UNREACHABLE %d\n",
@@ -3026,7 +3030,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                return 0;
 
        case RDMA_CM_EVENT_CONNECT_ERROR:
-               conn = (kib_conn_t *)cmid->context;
+               conn = (struct kib_conn *)cmid->context;
                LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
                        conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
                CNETERR("%s: CONNECT ERROR %d\n",
@@ -3036,7 +3040,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                return 0;
 
        case RDMA_CM_EVENT_REJECTED:
-               conn = (kib_conn_t *)cmid->context;
+               conn = (struct kib_conn *)cmid->context;
                switch (conn->ibc_state) {
                default:
                        LBUG();
@@ -3058,7 +3062,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                return 0;
 
        case RDMA_CM_EVENT_ESTABLISHED:
-               conn = (kib_conn_t *)cmid->context;
+               conn = (struct kib_conn *)cmid->context;
                switch (conn->ibc_state) {
                default:
                        LBUG();
@@ -3084,7 +3088,7 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
                CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
                return 0;
        case RDMA_CM_EVENT_DISCONNECTED:
-               conn = (kib_conn_t *)cmid->context;
+               conn = (struct kib_conn *)cmid->context;
                if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
                        CERROR("%s DISCONNECTED\n",
                               libcfs_nid2str(conn->ibc_peer->ibp_nid));
@@ -3113,13 +3117,13 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
 }
 
 static int
-kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
+kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
 {
-       kib_tx_t *tx;
+       struct kib_tx *tx;
        struct list_head *ttmp;
 
        list_for_each(ttmp, txs) {
-               tx = list_entry(ttmp, kib_tx_t, tx_list);
+               tx = list_entry(ttmp, struct kib_tx, tx_list);
 
                if (txs != &conn->ibc_active_txs) {
                        LASSERT(tx->tx_queued);
@@ -3140,7 +3144,7 @@ kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
 }
 
 static int
-kiblnd_conn_timed_out_locked(kib_conn_t *conn)
+kiblnd_conn_timed_out_locked(struct kib_conn *conn)
 {
        return  kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
                kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
@@ -3156,10 +3160,10 @@ kiblnd_check_conns(int idx)
        LIST_HEAD(checksends);
        struct list_head *peers = &kiblnd_data.kib_peers[idx];
        struct list_head *ptmp;
-       kib_peer_t *peer;
-       kib_conn_t *conn;
-       kib_conn_t *temp;
-       kib_conn_t *tmp;
+       struct kib_peer *peer;
+       struct kib_conn *conn;
+       struct kib_conn *temp;
+       struct kib_conn *tmp;
        struct list_head *ctmp;
        unsigned long flags;
 
@@ -3171,13 +3175,13 @@ kiblnd_check_conns(int idx)
        read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
 
        list_for_each(ptmp, peers) {
-               peer = list_entry(ptmp, kib_peer_t, ibp_list);
+               peer = list_entry(ptmp, struct kib_peer, ibp_list);
 
                list_for_each(ctmp, &peer->ibp_conns) {
                        int timedout;
                        int sendnoop;
 
-                       conn = list_entry(ctmp, kib_conn_t, ibc_list);
+                       conn = list_entry(ctmp, struct kib_conn, ibc_list);
 
                        LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
 
@@ -3235,7 +3239,7 @@ kiblnd_check_conns(int idx)
 }
 
 static void
-kiblnd_disconnect_conn(kib_conn_t *conn)
+kiblnd_disconnect_conn(struct kib_conn *conn)
 {
        LASSERT(!in_interrupt());
        LASSERT(current == kiblnd_data.kib_connd);
@@ -3264,7 +3268,7 @@ kiblnd_connd(void *arg)
        spinlock_t *lock= &kiblnd_data.kib_connd_lock;
        wait_queue_t wait;
        unsigned long flags;
-       kib_conn_t *conn;
+       struct kib_conn *conn;
        int timeout;
        int i;
        int dropped_lock;
@@ -3284,10 +3288,10 @@ kiblnd_connd(void *arg)
                dropped_lock = 0;
 
                if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
-                       kib_peer_t *peer = NULL;
+                       struct kib_peer *peer = NULL;
 
                        conn = list_entry(kiblnd_data.kib_connd_zombies.next,
-                                         kib_conn_t, ibc_list);
+                                         struct kib_conn, ibc_list);
                        list_del(&conn->ibc_list);
                        if (conn->ibc_reconnect) {
                                peer = conn->ibc_peer;
@@ -3314,7 +3318,7 @@ kiblnd_connd(void *arg)
 
                if (!list_empty(&kiblnd_data.kib_connd_conns)) {
                        conn = list_entry(kiblnd_data.kib_connd_conns.next,
-                                         kib_conn_t, ibc_list);
+                                         struct kib_conn, ibc_list);
                        list_del(&conn->ibc_list);
 
                        spin_unlock_irqrestore(lock, flags);
@@ -3338,7 +3342,7 @@ kiblnd_connd(void *arg)
                                break;
 
                        conn = list_entry(kiblnd_data.kib_reconn_list.next,
-                                         kib_conn_t, ibc_list);
+                                         struct kib_conn, ibc_list);
                        list_del(&conn->ibc_list);
 
                        spin_unlock_irqrestore(lock, flags);
@@ -3409,7 +3413,7 @@ kiblnd_connd(void *arg)
 void
 kiblnd_qp_event(struct ib_event *event, void *arg)
 {
-       kib_conn_t *conn = arg;
+       struct kib_conn *conn = arg;
 
        switch (event->event) {
        case IB_EVENT_COMM_EST:
@@ -3471,7 +3475,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
         * occurred.  But in this case, !ibc_nrx && !ibc_nsends_posted
         * and this CQ is about to be destroyed so I NOOP.
         */
-       kib_conn_t *conn = arg;
+       struct kib_conn *conn = arg;
        struct kib_sched_info *sched = conn->ibc_sched;
        unsigned long flags;
 
@@ -3498,7 +3502,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg)
 void
 kiblnd_cq_event(struct ib_event *event, void *arg)
 {
-       kib_conn_t *conn = arg;
+       struct kib_conn *conn = arg;
 
        CERROR("%s: async CQ event type %d\n",
               libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
@@ -3509,7 +3513,7 @@ kiblnd_scheduler(void *arg)
 {
        long id = (long)arg;
        struct kib_sched_info *sched;
-       kib_conn_t *conn;
+       struct kib_conn *conn;
        wait_queue_t wait;
        unsigned long flags;
        struct ib_wc wc;
@@ -3544,7 +3548,7 @@ kiblnd_scheduler(void *arg)
                did_something = 0;
 
                if (!list_empty(&sched->ibs_conns)) {
-                       conn = list_entry(sched->ibs_conns.next, kib_conn_t,
+                       conn = list_entry(sched->ibs_conns.next, struct kib_conn,
                                          ibc_sched_list);
                        /* take over kib_sched_conns' ref on conn... */
                        LASSERT(conn->ibc_scheduled);
@@ -3644,7 +3648,7 @@ int
 kiblnd_failover_thread(void *arg)
 {
        rwlock_t *glock = &kiblnd_data.kib_global_lock;
-       kib_dev_t *dev;
+       struct kib_dev *dev;
        wait_queue_t wait;
        unsigned long flags;
        int rc;
index f8fdd4ae3dbf7d30e32b2193f7806c695b37fa90..44e960f60833f66361412ac4037b436a2cb04320 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -145,7 +141,7 @@ static int use_privileged_port = 1;
 module_param(use_privileged_port, int, 0644);
 MODULE_PARM_DESC(use_privileged_port, "use privileged port when initiating connection");
 
-kib_tunables_t kiblnd_tunables = {
+struct kib_tunables kiblnd_tunables = {
        .kib_dev_failover      = &dev_failover,
        .kib_service           = &service,
        .kib_cksum             = &cksum,
index 406c0e7a57b919ca24a320610b9035b4f389d766..07ec540946cdcc41aa17e6f884b4050815f34580 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
 #include "socklnd.h"
 
 static lnd_t the_ksocklnd;
-ksock_nal_data_t ksocknal_data;
+struct ksock_nal_data ksocknal_data;
 
-static ksock_interface_t *
+static struct ksock_interface *
 ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
 {
-       ksock_net_t *net = ni->ni_data;
+       struct ksock_net *net = ni->ni_data;
        int i;
-       ksock_interface_t *iface;
+       struct ksock_interface *iface;
 
        for (i = 0; i < net->ksnn_ninterfaces; i++) {
                LASSERT(i < LNET_MAX_INTERFACES);
@@ -64,10 +60,10 @@ ksocknal_ip2iface(lnet_ni_t *ni, __u32 ip)
        return NULL;
 }
 
-static ksock_route_t *
+static struct ksock_route *
 ksocknal_create_route(__u32 ipaddr, int port)
 {
-       ksock_route_t *route;
+       struct ksock_route *route;
 
        LIBCFS_ALLOC(route, sizeof(*route));
        if (!route)
@@ -89,7 +85,7 @@ ksocknal_create_route(__u32 ipaddr, int port)
 }
 
 void
-ksocknal_destroy_route(ksock_route_t *route)
+ksocknal_destroy_route(struct ksock_route *route)
 {
        LASSERT(!atomic_read(&route->ksnr_refcount));
 
@@ -100,11 +96,11 @@ ksocknal_destroy_route(ksock_route_t *route)
 }
 
 static int
-ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 {
        int cpt = lnet_cpt_of_nid(id.nid);
-       ksock_net_t *net = ni->ni_data;
-       ksock_peer_t *peer;
+       struct ksock_net *net = ni->ni_data;
+       struct ksock_peer *peer;
 
        LASSERT(id.nid != LNET_NID_ANY);
        LASSERT(id.pid != LNET_PID_ANY);
@@ -148,9 +144,9 @@ ksocknal_create_peer(ksock_peer_t **peerp, lnet_ni_t *ni, lnet_process_id_t id)
 }
 
 void
-ksocknal_destroy_peer(ksock_peer_t *peer)
+ksocknal_destroy_peer(struct ksock_peer *peer)
 {
-       ksock_net_t *net = peer->ksnp_ni->ni_data;
+       struct ksock_net *net = peer->ksnp_ni->ni_data;
 
        CDEBUG(D_NET, "peer %s %p deleted\n",
               libcfs_id2str(peer->ksnp_id), peer);
@@ -175,15 +171,15 @@ ksocknal_destroy_peer(ksock_peer_t *peer)
        spin_unlock_bh(&net->ksnn_lock);
 }
 
-ksock_peer_t *
+struct ksock_peer *
 ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
 {
        struct list_head *peer_list = ksocknal_nid2peerlist(id.nid);
        struct list_head *tmp;
-       ksock_peer_t *peer;
+       struct ksock_peer *peer;
 
        list_for_each(tmp, peer_list) {
-               peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+               peer = list_entry(tmp, struct ksock_peer, ksnp_list);
 
                LASSERT(!peer->ksnp_closing);
 
@@ -202,10 +198,10 @@ ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id)
        return NULL;
 }
 
-ksock_peer_t *
+struct ksock_peer *
 ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
 {
-       ksock_peer_t *peer;
+       struct ksock_peer *peer;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
        peer = ksocknal_find_peer_locked(ni, id);
@@ -217,11 +213,11 @@ ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id)
 }
 
 static void
-ksocknal_unlink_peer_locked(ksock_peer_t *peer)
+ksocknal_unlink_peer_locked(struct ksock_peer *peer)
 {
        int i;
        __u32 ip;
-       ksock_interface_t *iface;
+       struct ksock_interface *iface;
 
        for (i = 0; i < peer->ksnp_n_passive_ips; i++) {
                LASSERT(i < LNET_MAX_INTERFACES);
@@ -253,9 +249,9 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
                       lnet_process_id_t *id, __u32 *myip, __u32 *peer_ip,
                       int *port, int *conn_count, int *share_count)
 {
-       ksock_peer_t *peer;
+       struct ksock_peer *peer;
        struct list_head *ptmp;
-       ksock_route_t *route;
+       struct ksock_route *route;
        struct list_head *rtmp;
        int i;
        int j;
@@ -265,7 +261,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
 
        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                       peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+                       peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
 
                        if (peer->ksnp_ni != ni)
                                continue;
@@ -303,7 +299,7 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
                                if (index-- > 0)
                                        continue;
 
-                               route = list_entry(rtmp, ksock_route_t,
+                               route = list_entry(rtmp, struct ksock_route,
                                                   ksnr_list);
 
                                *id = peer->ksnp_id;
@@ -323,11 +319,11 @@ ksocknal_get_peer_info(lnet_ni_t *ni, int index,
 }
 
 static void
-ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
 {
-       ksock_peer_t *peer = route->ksnr_peer;
+       struct ksock_peer *peer = route->ksnr_peer;
        int type = conn->ksnc_type;
-       ksock_interface_t *iface;
+       struct ksock_interface *iface;
 
        conn->ksnc_route = route;
        ksocknal_route_addref(route);
@@ -369,11 +365,11 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
 }
 
 static void
-ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
+ksocknal_add_route_locked(struct ksock_peer *peer, struct ksock_route *route)
 {
        struct list_head *tmp;
-       ksock_conn_t *conn;
-       ksock_route_t *route2;
+       struct ksock_conn *conn;
+       struct ksock_route *route2;
 
        LASSERT(!peer->ksnp_closing);
        LASSERT(!route->ksnr_peer);
@@ -383,7 +379,7 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
 
        /* LASSERT(unique) */
        list_for_each(tmp, &peer->ksnp_routes) {
-               route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+               route2 = list_entry(tmp, struct ksock_route, ksnr_list);
 
                if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
                        CERROR("Duplicate route %s %pI4h\n",
@@ -399,7 +395,7 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
        list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
 
        list_for_each(tmp, &peer->ksnp_conns) {
-               conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+               conn = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                if (conn->ksnc_ipaddr != route->ksnr_ipaddr)
                        continue;
@@ -410,11 +406,11 @@ ksocknal_add_route_locked(ksock_peer_t *peer, ksock_route_t *route)
 }
 
 static void
-ksocknal_del_route_locked(ksock_route_t *route)
+ksocknal_del_route_locked(struct ksock_route *route)
 {
-       ksock_peer_t *peer = route->ksnr_peer;
-       ksock_interface_t *iface;
-       ksock_conn_t *conn;
+       struct ksock_peer *peer = route->ksnr_peer;
+       struct ksock_interface *iface;
+       struct ksock_conn *conn;
        struct list_head *ctmp;
        struct list_head *cnxt;
 
@@ -422,7 +418,7 @@ ksocknal_del_route_locked(ksock_route_t *route)
 
        /* Close associated conns */
        list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
-               conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+               conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
 
                if (conn->ksnc_route != route)
                        continue;
@@ -455,10 +451,10 @@ int
 ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
 {
        struct list_head *tmp;
-       ksock_peer_t *peer;
-       ksock_peer_t *peer2;
-       ksock_route_t *route;
-       ksock_route_t *route2;
+       struct ksock_peer *peer;
+       struct ksock_peer *peer2;
+       struct ksock_route *route;
+       struct ksock_route *route2;
        int rc;
 
        if (id.nid == LNET_NID_ANY ||
@@ -479,7 +475,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
 
        /* always called with a ref on ni, so shutdown can't have started */
-       LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
+       LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
 
        peer2 = ksocknal_find_peer_locked(ni, id);
        if (peer2) {
@@ -493,7 +489,7 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
 
        route2 = NULL;
        list_for_each(tmp, &peer->ksnp_routes) {
-               route2 = list_entry(tmp, ksock_route_t, ksnr_list);
+               route2 = list_entry(tmp, struct ksock_route, ksnr_list);
 
                if (route2->ksnr_ipaddr == ipaddr)
                        break;
@@ -514,10 +510,10 @@ ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ipaddr, int port)
 }
 
 static void
-ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
+ksocknal_del_peer_locked(struct ksock_peer *peer, __u32 ip)
 {
-       ksock_conn_t *conn;
-       ksock_route_t *route;
+       struct ksock_conn *conn;
+       struct ksock_route *route;
        struct list_head *tmp;
        struct list_head *nxt;
        int nshared;
@@ -528,7 +524,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
        ksocknal_peer_addref(peer);
 
        list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                /* no match */
                if (!(!ip || route->ksnr_ipaddr == ip))
@@ -541,7 +537,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
 
        nshared = 0;
        list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
                nshared += route->ksnr_share_count;
        }
 
@@ -551,7 +547,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
                 * left
                 */
                list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
-                       route = list_entry(tmp, ksock_route_t, ksnr_list);
+                       route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                        /* we should only be removing auto-entries */
                        LASSERT(!route->ksnr_share_count);
@@ -559,7 +555,7 @@ ksocknal_del_peer_locked(ksock_peer_t *peer, __u32 ip)
                }
 
                list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
-                       conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+                       conn = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                        ksocknal_close_conn_locked(conn, 0);
                }
@@ -575,7 +571,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
        LIST_HEAD(zombies);
        struct list_head *ptmp;
        struct list_head *pnxt;
-       ksock_peer_t *peer;
+       struct ksock_peer *peer;
        int lo;
        int hi;
        int i;
@@ -593,7 +589,7 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
 
        for (i = lo; i <= hi; i++) {
                list_for_each_safe(ptmp, pnxt, &ksocknal_data.ksnd_peers[i]) {
-                       peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+                       peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
 
                        if (peer->ksnp_ni != ni)
                                continue;
@@ -628,12 +624,12 @@ ksocknal_del_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip)
        return rc;
 }
 
-static ksock_conn_t *
+static struct ksock_conn *
 ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
 {
-       ksock_peer_t *peer;
+       struct ksock_peer *peer;
        struct list_head *ptmp;
-       ksock_conn_t *conn;
+       struct ksock_conn *conn;
        struct list_head *ctmp;
        int i;
 
@@ -641,7 +637,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
 
        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                       peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+                       peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
 
                        LASSERT(!peer->ksnp_closing);
 
@@ -652,7 +648,7 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
                                if (index-- > 0)
                                        continue;
 
-                               conn = list_entry(ctmp, ksock_conn_t,
+                               conn = list_entry(ctmp, struct ksock_conn,
                                                  ksnc_list);
                                ksocknal_conn_addref(conn);
                                read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -665,11 +661,11 @@ ksocknal_get_conn_by_idx(lnet_ni_t *ni, int index)
        return NULL;
 }
 
-static ksock_sched_t *
+static struct ksock_sched *
 ksocknal_choose_scheduler_locked(unsigned int cpt)
 {
        struct ksock_sched_info *info = ksocknal_data.ksnd_sched_info[cpt];
-       ksock_sched_t *sched;
+       struct ksock_sched *sched;
        int i;
 
        LASSERT(info->ksi_nthreads > 0);
@@ -691,7 +687,7 @@ ksocknal_choose_scheduler_locked(unsigned int cpt)
 static int
 ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
 {
-       ksock_net_t *net = ni->ni_data;
+       struct ksock_net *net = ni->ni_data;
        int i;
        int nip;
 
@@ -719,7 +715,7 @@ ksocknal_local_ipvec(lnet_ni_t *ni, __u32 *ipaddrs)
 }
 
 static int
-ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
+ksocknal_match_peerip(struct ksock_interface *iface, __u32 *ips, int nips)
 {
        int best_netmatch = 0;
        int best_xor      = 0;
@@ -751,12 +747,12 @@ ksocknal_match_peerip(ksock_interface_t *iface, __u32 *ips, int nips)
 }
 
 static int
-ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
+ksocknal_select_ips(struct ksock_peer *peer, __u32 *peerips, int n_peerips)
 {
        rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
-       ksock_net_t *net = peer->ksnp_ni->ni_data;
-       ksock_interface_t *iface;
-       ksock_interface_t *best_iface;
+       struct ksock_net *net = peer->ksnp_ni->ni_data;
+       struct ksock_interface *iface;
+       struct ksock_interface *best_iface;
        int n_ips;
        int i;
        int j;
@@ -862,17 +858,17 @@ ksocknal_select_ips(ksock_peer_t *peer, __u32 *peerips, int n_peerips)
 }
 
 static void
-ksocknal_create_routes(ksock_peer_t *peer, int port,
+ksocknal_create_routes(struct ksock_peer *peer, int port,
                       __u32 *peer_ipaddrs, int npeer_ipaddrs)
 {
-       ksock_route_t *newroute = NULL;
+       struct ksock_route *newroute = NULL;
        rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
        lnet_ni_t *ni = peer->ksnp_ni;
-       ksock_net_t *net = ni->ni_data;
+       struct ksock_net *net = ni->ni_data;
        struct list_head *rtmp;
-       ksock_route_t *route;
-       ksock_interface_t *iface;
-       ksock_interface_t *best_iface;
+       struct ksock_route *route;
+       struct ksock_interface *iface;
+       struct ksock_interface *best_iface;
        int best_netmatch;
        int this_netmatch;
        int best_nroutes;
@@ -919,7 +915,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
                /* Already got a route? */
                route = NULL;
                list_for_each(rtmp, &peer->ksnp_routes) {
-                       route = list_entry(rtmp, ksock_route_t, ksnr_list);
+                       route = list_entry(rtmp, struct ksock_route, ksnr_list);
 
                        if (route->ksnr_ipaddr == newroute->ksnr_ipaddr)
                                break;
@@ -941,7 +937,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
 
                        /* Using this interface already? */
                        list_for_each(rtmp, &peer->ksnp_routes) {
-                               route = list_entry(rtmp, ksock_route_t,
+                               route = list_entry(rtmp, struct ksock_route,
                                                   ksnr_list);
 
                                if (route->ksnr_myipaddr == iface->ksni_ipaddr)
@@ -985,7 +981,7 @@ ksocknal_create_routes(ksock_peer_t *peer, int port,
 int
 ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
 {
-       ksock_connreq_t *cr;
+       struct ksock_connreq *cr;
        int rc;
        __u32 peer_ip;
        int peer_port;
@@ -1014,9 +1010,9 @@ ksocknal_accept(lnet_ni_t *ni, struct socket *sock)
 }
 
 static int
-ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
+ksocknal_connecting(struct ksock_peer *peer, __u32 ipaddr)
 {
-       ksock_route_t *route;
+       struct ksock_route *route;
 
        list_for_each_entry(route, &peer->ksnp_routes, ksnr_list) {
                if (route->ksnr_ipaddr == ipaddr)
@@ -1026,7 +1022,7 @@ ksocknal_connecting(ksock_peer_t *peer, __u32 ipaddr)
 }
 
 int
-ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
+ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
                     struct socket *sock, int type)
 {
        rwlock_t *global_lock = &ksocknal_data.ksnd_global_lock;
@@ -1034,15 +1030,15 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
        lnet_process_id_t peerid;
        struct list_head *tmp;
        __u64 incarnation;
-       ksock_conn_t *conn;
-       ksock_conn_t *conn2;
-       ksock_peer_t *peer = NULL;
-       ksock_peer_t *peer2;
-       ksock_sched_t *sched;
+       struct ksock_conn *conn;
+       struct ksock_conn *conn2;
+       struct ksock_peer *peer = NULL;
+       struct ksock_peer *peer2;
+       struct ksock_sched *sched;
        ksock_hello_msg_t *hello;
        int cpt;
-       ksock_tx_t *tx;
-       ksock_tx_t *txtmp;
+       struct ksock_tx *tx;
+       struct ksock_tx *txtmp;
        int rc;
        int active;
        char *warn = NULL;
@@ -1150,7 +1146,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
                write_lock_bh(global_lock);
 
                /* called with a ref on ni, so shutdown can't have started */
-               LASSERT(!((ksock_net_t *) ni->ni_data)->ksnn_shutdown);
+               LASSERT(!((struct ksock_net *)ni->ni_data)->ksnn_shutdown);
 
                peer2 = ksocknal_find_peer_locked(ni, peerid);
                if (!peer2) {
@@ -1233,7 +1229,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
         */
        if (conn->ksnc_ipaddr != conn->ksnc_myipaddr) {
                list_for_each(tmp, &peer->ksnp_conns) {
-                       conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+                       conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                        if (conn2->ksnc_ipaddr != conn->ksnc_ipaddr ||
                            conn2->ksnc_myipaddr != conn->ksnc_myipaddr ||
@@ -1273,7 +1269,7 @@ ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
         * continually create duplicate routes.
         */
        list_for_each(tmp, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                if (route->ksnr_ipaddr != conn->ksnc_ipaddr)
                        continue;
@@ -1432,16 +1428,16 @@ failed_0:
 }
 
 void
-ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
+ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
 {
        /*
         * This just does the immmediate housekeeping, and queues the
         * connection for the reaper to terminate.
         * Caller holds ksnd_global_lock exclusively in irq context
         */
-       ksock_peer_t *peer = conn->ksnc_peer;
-       ksock_route_t *route;
-       ksock_conn_t *conn2;
+       struct ksock_peer *peer = conn->ksnc_peer;
+       struct ksock_route *route;
+       struct ksock_conn *conn2;
        struct list_head *tmp;
 
        LASSERT(!peer->ksnp_error);
@@ -1459,7 +1455,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
 
                conn2 = NULL;
                list_for_each(tmp, &peer->ksnp_conns) {
-                       conn2 = list_entry(tmp, ksock_conn_t, ksnc_list);
+                       conn2 = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                        if (conn2->ksnc_route == route &&
                            conn2->ksnc_type == conn->ksnc_type)
@@ -1484,7 +1480,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
                /* No more connections to this peer */
 
                if (!list_empty(&peer->ksnp_tx_queue)) {
-                       ksock_tx_t *tx;
+                       struct ksock_tx *tx;
 
                        LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
 
@@ -1524,7 +1520,7 @@ ksocknal_close_conn_locked(ksock_conn_t *conn, int error)
 }
 
 void
-ksocknal_peer_failed(ksock_peer_t *peer)
+ksocknal_peer_failed(struct ksock_peer *peer)
 {
        int notify = 0;
        unsigned long last_alive = 0;
@@ -1552,12 +1548,12 @@ ksocknal_peer_failed(ksock_peer_t *peer)
 }
 
 void
-ksocknal_finalize_zcreq(ksock_conn_t *conn)
+ksocknal_finalize_zcreq(struct ksock_conn *conn)
 {
-       ksock_peer_t *peer = conn->ksnc_peer;
-       ksock_tx_t *tx;
-       ksock_tx_t *temp;
-       ksock_tx_t *tmp;
+       struct ksock_peer *peer = conn->ksnc_peer;
+       struct ksock_tx *tx;
+       struct ksock_tx *temp;
+       struct ksock_tx *tmp;
        LIST_HEAD(zlist);
 
        /*
@@ -1589,7 +1585,7 @@ ksocknal_finalize_zcreq(ksock_conn_t *conn)
 }
 
 void
-ksocknal_terminate_conn(ksock_conn_t *conn)
+ksocknal_terminate_conn(struct ksock_conn *conn)
 {
        /*
         * This gets called by the reaper (guaranteed thread context) to
@@ -1597,8 +1593,8 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
         * ksnc_refcount will eventually hit zero, and then the reaper will
         * destroy it.
         */
-       ksock_peer_t *peer = conn->ksnc_peer;
-       ksock_sched_t *sched = conn->ksnc_scheduler;
+       struct ksock_peer *peer = conn->ksnc_peer;
+       struct ksock_sched *sched = conn->ksnc_scheduler;
        int failed = 0;
 
        LASSERT(conn->ksnc_closing);
@@ -1656,7 +1652,7 @@ ksocknal_terminate_conn(ksock_conn_t *conn)
 }
 
 void
-ksocknal_queue_zombie_conn(ksock_conn_t *conn)
+ksocknal_queue_zombie_conn(struct ksock_conn *conn)
 {
        /* Queue the conn for the reaper to destroy */
 
@@ -1670,7 +1666,7 @@ ksocknal_queue_zombie_conn(ksock_conn_t *conn)
 }
 
 void
-ksocknal_destroy_conn(ksock_conn_t *conn)
+ksocknal_destroy_conn(struct ksock_conn *conn)
 {
        unsigned long last_rcv;
 
@@ -1730,15 +1726,15 @@ ksocknal_destroy_conn(ksock_conn_t *conn)
 }
 
 int
-ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
+ksocknal_close_peer_conns_locked(struct ksock_peer *peer, __u32 ipaddr, int why)
 {
-       ksock_conn_t *conn;
+       struct ksock_conn *conn;
        struct list_head *ctmp;
        struct list_head *cnxt;
        int count = 0;
 
        list_for_each_safe(ctmp, cnxt, &peer->ksnp_conns) {
-               conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+               conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
 
                if (!ipaddr || conn->ksnc_ipaddr == ipaddr) {
                        count++;
@@ -1750,9 +1746,9 @@ ksocknal_close_peer_conns_locked(ksock_peer_t *peer, __u32 ipaddr, int why)
 }
 
 int
-ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
+ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why)
 {
-       ksock_peer_t *peer = conn->ksnc_peer;
+       struct ksock_peer *peer = conn->ksnc_peer;
        __u32 ipaddr = conn->ksnc_ipaddr;
        int count;
 
@@ -1768,7 +1764,7 @@ ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why)
 int
 ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
 {
-       ksock_peer_t *peer;
+       struct ksock_peer *peer;
        struct list_head *ptmp;
        struct list_head *pnxt;
        int lo;
@@ -1789,7 +1785,7 @@ ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr)
        for (i = lo; i <= hi; i++) {
                list_for_each_safe(ptmp, pnxt,
                                   &ksocknal_data.ksnd_peers[i]) {
-                       peer = list_entry(ptmp, ksock_peer_t, ksnp_list);
+                       peer = list_entry(ptmp, struct ksock_peer, ksnp_list);
 
                        if (!((id.nid == LNET_NID_ANY || id.nid == peer->ksnp_id.nid) &&
                              (id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
@@ -1844,7 +1840,7 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
        int connect = 1;
        unsigned long last_alive = 0;
        unsigned long now = cfs_time_current();
-       ksock_peer_t *peer = NULL;
+       struct ksock_peer *peer = NULL;
        rwlock_t *glock = &ksocknal_data.ksnd_global_lock;
        lnet_process_id_t id = {
                .nid = nid,
@@ -1856,11 +1852,11 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
        peer = ksocknal_find_peer_locked(ni, id);
        if (peer) {
                struct list_head *tmp;
-               ksock_conn_t *conn;
+               struct ksock_conn *conn;
                int bufnob;
 
                list_for_each(tmp, &peer->ksnp_conns) {
-                       conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+                       conn = list_entry(tmp, struct ksock_conn, ksnc_list);
                        bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
 
                        if (bufnob < conn->ksnc_tx_bufnob) {
@@ -1902,12 +1898,12 @@ ksocknal_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
 }
 
 static void
-ksocknal_push_peer(ksock_peer_t *peer)
+ksocknal_push_peer(struct ksock_peer *peer)
 {
        int index;
        int i;
        struct list_head *tmp;
-       ksock_conn_t *conn;
+       struct ksock_conn *conn;
 
        for (index = 0; ; index++) {
                read_lock(&ksocknal_data.ksnd_global_lock);
@@ -1917,7 +1913,7 @@ ksocknal_push_peer(ksock_peer_t *peer)
 
                list_for_each(tmp, &peer->ksnp_conns) {
                        if (i++ == index) {
-                               conn = list_entry(tmp, ksock_conn_t,
+                               conn = list_entry(tmp, struct ksock_conn,
                                                  ksnc_list);
                                ksocknal_conn_addref(conn);
                                break;
@@ -1954,7 +1950,7 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
                int peer_off; /* searching offset in peer hash table */
 
                for (peer_off = 0; ; peer_off++) {
-                       ksock_peer_t *peer;
+                       struct ksock_peer *peer;
                        int i = 0;
 
                        read_lock(&ksocknal_data.ksnd_global_lock);
@@ -1986,15 +1982,15 @@ static int ksocknal_push(lnet_ni_t *ni, lnet_process_id_t id)
 static int
 ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
 {
-       ksock_net_t *net = ni->ni_data;
-       ksock_interface_t *iface;
+       struct ksock_net *net = ni->ni_data;
+       struct ksock_interface *iface;
        int rc;
        int i;
        int j;
        struct list_head *ptmp;
-       ksock_peer_t *peer;
+       struct ksock_peer *peer;
        struct list_head *rtmp;
-       ksock_route_t *route;
+       struct ksock_route *route;
 
        if (!ipaddress || !netmask)
                return -EINVAL;
@@ -2017,7 +2013,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
 
                for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                        list_for_each(ptmp, &ksocknal_data.ksnd_peers[i]) {
-                               peer = list_entry(ptmp, ksock_peer_t,
+                               peer = list_entry(ptmp, struct ksock_peer,
                                                  ksnp_list);
 
                                for (j = 0; j < peer->ksnp_n_passive_ips; j++)
@@ -2025,7 +2021,7 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
                                                iface->ksni_npeers++;
 
                                list_for_each(rtmp, &peer->ksnp_routes) {
-                                       route = list_entry(rtmp, ksock_route_t,
+                                       route = list_entry(rtmp, struct ksock_route,
                                                           ksnr_list);
 
                                        if (route->ksnr_myipaddr == ipaddress)
@@ -2044,12 +2040,12 @@ ksocknal_add_interface(lnet_ni_t *ni, __u32 ipaddress, __u32 netmask)
 }
 
 static void
-ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
+ksocknal_peer_del_interface_locked(struct ksock_peer *peer, __u32 ipaddr)
 {
        struct list_head *tmp;
        struct list_head *nxt;
-       ksock_route_t *route;
-       ksock_conn_t *conn;
+       struct ksock_route *route;
+       struct ksock_conn *conn;
        int i;
        int j;
 
@@ -2063,7 +2059,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
                }
 
        list_for_each_safe(tmp, nxt, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                if (route->ksnr_myipaddr != ipaddr)
                        continue;
@@ -2077,7 +2073,7 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
        }
 
        list_for_each_safe(tmp, nxt, &peer->ksnp_conns) {
-               conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+               conn = list_entry(tmp, struct ksock_conn, ksnc_list);
 
                if (conn->ksnc_myipaddr == ipaddr)
                        ksocknal_close_conn_locked(conn, 0);
@@ -2087,11 +2083,11 @@ ksocknal_peer_del_interface_locked(ksock_peer_t *peer, __u32 ipaddr)
 static int
 ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
 {
-       ksock_net_t *net = ni->ni_data;
+       struct ksock_net *net = ni->ni_data;
        int rc = -ENOENT;
        struct list_head *tmp;
        struct list_head *nxt;
-       ksock_peer_t *peer;
+       struct ksock_peer *peer;
        __u32 this_ip;
        int i;
        int j;
@@ -2115,7 +2111,7 @@ ksocknal_del_interface(lnet_ni_t *ni, __u32 ipaddress)
                for (j = 0; j < ksocknal_data.ksnd_peer_hash_size; j++) {
                        list_for_each_safe(tmp, nxt,
                                           &ksocknal_data.ksnd_peers[j]) {
-                               peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+                               peer = list_entry(tmp, struct ksock_peer, ksnp_list);
 
                                if (peer->ksnp_ni != ni)
                                        continue;
@@ -2139,8 +2135,8 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
 
        switch (cmd) {
        case IOC_LIBCFS_GET_INTERFACE: {
-               ksock_net_t       *net = ni->ni_data;
-               ksock_interface_t *iface;
+               struct ksock_net       *net = ni->ni_data;
+               struct ksock_interface *iface;
 
                read_lock(&ksocknal_data.ksnd_global_lock);
 
@@ -2209,7 +2205,7 @@ ksocknal_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
                int txmem;
                int rxmem;
                int nagle;
-               ksock_conn_t *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+               struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
 
                if (!conn)
                        return -ENOENT;
@@ -2284,8 +2280,8 @@ ksocknal_free_buffers(void)
 
        if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
                struct list_head zlist;
-               ksock_tx_t *tx;
-               ksock_tx_t *temp;
+               struct ksock_tx *tx;
+               struct ksock_tx *temp;
 
                list_add(&zlist, &ksocknal_data.ksnd_idle_noop_txs);
                list_del_init(&ksocknal_data.ksnd_idle_noop_txs);
@@ -2304,7 +2300,7 @@ static void
 ksocknal_base_shutdown(void)
 {
        struct ksock_sched_info *info;
-       ksock_sched_t *sched;
+       struct ksock_sched *sched;
        int i;
        int j;
 
@@ -2446,7 +2442,7 @@ ksocknal_base_startup(void)
                goto failed;
 
        cfs_percpt_for_each(info, i, ksocknal_data.ksnd_sched_info) {
-               ksock_sched_t *sched;
+               struct ksock_sched *sched;
                int nthrs;
 
                nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
@@ -2534,7 +2530,7 @@ ksocknal_base_startup(void)
 static void
 ksocknal_debug_peerhash(lnet_ni_t *ni)
 {
-       ksock_peer_t *peer = NULL;
+       struct ksock_peer *peer = NULL;
        struct list_head *tmp;
        int i;
 
@@ -2542,7 +2538,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
 
        for (i = 0; i < ksocknal_data.ksnd_peer_hash_size; i++) {
                list_for_each(tmp, &ksocknal_data.ksnd_peers[i]) {
-                       peer = list_entry(tmp, ksock_peer_t, ksnp_list);
+                       peer = list_entry(tmp, struct ksock_peer, ksnp_list);
 
                        if (peer->ksnp_ni == ni)
                                break;
@@ -2552,8 +2548,8 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
        }
 
        if (peer) {
-               ksock_route_t *route;
-               ksock_conn_t  *conn;
+               struct ksock_route *route;
+               struct ksock_conn  *conn;
 
                CWARN("Active peer on shutdown: %s, ref %d, scnt %d, closing %d, accepting %d, err %d, zcookie %llu, txq %d, zc_req %d\n",
                      libcfs_id2str(peer->ksnp_id),
@@ -2565,7 +2561,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
                      !list_empty(&peer->ksnp_zc_req_list));
 
                list_for_each(tmp, &peer->ksnp_routes) {
-                       route = list_entry(tmp, ksock_route_t, ksnr_list);
+                       route = list_entry(tmp, struct ksock_route, ksnr_list);
                        CWARN("Route: ref %d, schd %d, conn %d, cnted %d, del %d\n",
                              atomic_read(&route->ksnr_refcount),
                              route->ksnr_scheduled, route->ksnr_connecting,
@@ -2573,7 +2569,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
                }
 
                list_for_each(tmp, &peer->ksnp_conns) {
-                       conn = list_entry(tmp, ksock_conn_t, ksnc_list);
+                       conn = list_entry(tmp, struct ksock_conn, ksnc_list);
                        CWARN("Conn: ref %d, sref %d, t %d, c %d\n",
                              atomic_read(&conn->ksnc_conn_refcount),
                              atomic_read(&conn->ksnc_sock_refcount),
@@ -2587,7 +2583,7 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
 void
 ksocknal_shutdown(lnet_ni_t *ni)
 {
-       ksock_net_t *net = ni->ni_data;
+       struct ksock_net *net = ni->ni_data;
        int i;
        lnet_process_id_t anyid = {0};
 
@@ -2637,7 +2633,7 @@ ksocknal_shutdown(lnet_ni_t *ni)
 }
 
 static int
-ksocknal_enumerate_interfaces(ksock_net_t *net)
+ksocknal_enumerate_interfaces(struct ksock_net *net)
 {
        char **names;
        int i;
@@ -2694,7 +2690,7 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
 }
 
 static int
-ksocknal_search_new_ipif(ksock_net_t *net)
+ksocknal_search_new_ipif(struct ksock_net *net)
 {
        int new_ipif = 0;
        int i;
@@ -2703,7 +2699,7 @@ ksocknal_search_new_ipif(ksock_net_t *net)
                char *ifnam = &net->ksnn_interfaces[i].ksni_name[0];
                char *colon = strchr(ifnam, ':');
                int found  = 0;
-               ksock_net_t *tmp;
+               struct ksock_net *tmp;
                int j;
 
                if (colon) /* ignore alias device */
@@ -2760,7 +2756,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
        for (i = 0; i < nthrs; i++) {
                long id;
                char name[20];
-               ksock_sched_t *sched;
+               struct ksock_sched *sched;
 
                id = KSOCK_THREAD_ID(info->ksi_cpt, info->ksi_nthreads + i);
                sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
@@ -2782,7 +2778,7 @@ ksocknal_start_schedulers(struct ksock_sched_info *info)
 }
 
 static int
-ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
+ksocknal_net_start_threads(struct ksock_net *net, __u32 *cpts, int ncpts)
 {
        int newif = ksocknal_search_new_ipif(net);
        int rc;
@@ -2810,7 +2806,7 @@ ksocknal_net_start_threads(ksock_net_t *net, __u32 *cpts, int ncpts)
 int
 ksocknal_startup(lnet_ni_t *ni)
 {
-       ksock_net_t *net;
+       struct ksock_net *net;
        int rc;
        int i;
 
index a60d72f9432f04afb2cff568193c68c11b3971c9..a56632b4ee37f67a80452829e3f2d007a9d07be8 100644 (file)
@@ -77,8 +77,7 @@
 
 struct ksock_sched_info;
 
-typedef struct                           /* per scheduler state */
-{
+struct ksock_sched {                           /* per scheduler state */
        spinlock_t              kss_lock;       /* serialise */
        struct list_head        kss_rx_conns;   /* conn waiting to be read */
        struct list_head        kss_tx_conns;   /* conn waiting to be written */
@@ -89,13 +88,13 @@ typedef struct                                /* per scheduler state */
        struct ksock_sched_info *kss_info;      /* owner of it */
        struct page             *kss_rx_scratch_pgs[LNET_MAX_IOV];
        struct kvec             kss_scratch_iov[LNET_MAX_IOV];
-} ksock_sched_t;
+};
 
 struct ksock_sched_info {
        int                     ksi_nthreads_max; /* max allowed threads */
        int                     ksi_nthreads;     /* number of threads */
        int                     ksi_cpt;          /* CPT id */
-       ksock_sched_t           *ksi_scheds;      /* array of schedulers */
+       struct ksock_sched      *ksi_scheds;      /* array of schedulers */
 };
 
 #define KSOCK_CPT_SHIFT           16
@@ -103,16 +102,15 @@ struct ksock_sched_info {
 #define KSOCK_THREAD_CPT(id)      ((id) >> KSOCK_CPT_SHIFT)
 #define KSOCK_THREAD_SID(id)      ((id) & ((1UL << KSOCK_CPT_SHIFT) - 1))
 
-typedef struct                                  /* in-use interface */
-{
+struct ksock_interface {                       /* in-use interface */
        __u32           ksni_ipaddr;            /* interface's IP address */
        __u32           ksni_netmask;           /* interface's network mask */
        int             ksni_nroutes;           /* # routes using (active) */
        int             ksni_npeers;            /* # peers using (passive) */
        char            ksni_name[IFNAMSIZ];    /* interface name */
-} ksock_interface_t;
+};
 
-typedef struct {
+struct ksock_tunables {
        int          *ksnd_timeout;            /* "stuck" socket timeout
                                                * (seconds) */
        int          *ksnd_nscheds;            /* # scheduler threads in each
@@ -155,24 +153,24 @@ typedef struct {
                                                * Chelsio TOE) */
        int          *ksnd_zc_recv_min_nfrags; /* minimum # of fragments to
                                                * enable ZC receive */
-} ksock_tunables_t;
+};
 
-typedef struct {
+struct ksock_net {
        __u64             ksnn_incarnation;     /* my epoch */
        spinlock_t        ksnn_lock;            /* serialise */
        struct list_head          ksnn_list;            /* chain on global list */
        int               ksnn_npeers;          /* # peers */
        int               ksnn_shutdown;        /* shutting down? */
        int               ksnn_ninterfaces;     /* IP interfaces */
-       ksock_interface_t ksnn_interfaces[LNET_MAX_INTERFACES];
-} ksock_net_t;
+       struct ksock_interface ksnn_interfaces[LNET_MAX_INTERFACES];
+};
 
 /** connd timeout */
 #define SOCKNAL_CONND_TIMEOUT  120
 /** reserved thread for accepting & creating new connd */
 #define SOCKNAL_CONND_RESV     1
 
-typedef struct {
+struct ksock_nal_data {
        int                     ksnd_init;              /* initialisation state
                                                         */
        int                     ksnd_nnets;             /* # networks set up */
@@ -229,7 +227,7 @@ typedef struct {
        spinlock_t              ksnd_tx_lock;           /* serialise, g_lock
                                                         * unsafe */
 
-} ksock_nal_data_t;
+};
 
 #define SOCKNAL_INIT_NOTHING 0
 #define SOCKNAL_INIT_DATA    1
@@ -250,8 +248,7 @@ struct ksock_peer;  /* forward ref */
 struct ksock_route; /* forward ref */
 struct ksock_proto; /* forward ref */
 
-typedef struct                             /* transmit packet */
-{
+struct ksock_tx {                         /* transmit packet */
        struct list_head  tx_list;         /* queue on conn for transmission etc
                                            */
        struct list_head  tx_zc_list;      /* queue on peer for ZC request */
@@ -281,20 +278,20 @@ typedef struct                             /* transmit packet */
                        struct kvec iov[1];  /* virt hdr + payload */
                } virt;
        } tx_frags;
-} ksock_tx_t;
+};
 
-#define KSOCK_NOOP_TX_SIZE (offsetof(ksock_tx_t, tx_frags.paged.kiov[0]))
+#define KSOCK_NOOP_TX_SIZE (offsetof(struct ksock_tx, tx_frags.paged.kiov[0]))
 
-/* network zero copy callback descriptor embedded in ksock_tx_t */
+/* network zero copy callback descriptor embedded in struct ksock_tx */
 
 /*
  * space for the rx frag descriptors; we either read a single contiguous
  * header, or up to LNET_MAX_IOV frags of payload of either type.
  */
-typedef union {
+union ksock_rxiovspace {
        struct kvec      iov[LNET_MAX_IOV];
        lnet_kiov_t      kiov[LNET_MAX_IOV];
-} ksock_rxiovspace_t;
+};
 
 #define SOCKNAL_RX_KSM_HEADER   1 /* reading ksock message header */
 #define SOCKNAL_RX_LNET_HEADER  2 /* reading lnet message header */
@@ -303,7 +300,7 @@ typedef union {
 #define SOCKNAL_RX_LNET_PAYLOAD 5 /* reading lnet payload (to deliver here) */
 #define SOCKNAL_RX_SLOP         6 /* skipping body */
 
-typedef struct ksock_conn {
+struct ksock_conn {
        struct ksock_peer  *ksnc_peer;        /* owning peer */
        struct ksock_route *ksnc_route;       /* owning route */
        struct list_head   ksnc_list;         /* stash on peer's conn list */
@@ -314,8 +311,8 @@ typedef struct ksock_conn {
                                                     * write_space() callback */
        atomic_t           ksnc_conn_refcount;/* conn refcount */
        atomic_t           ksnc_sock_refcount;/* sock refcount */
-       ksock_sched_t      *ksnc_scheduler;   /* who schedules this connection
-                                              */
+       struct ksock_sched *ksnc_scheduler;     /* who schedules this connection
+                                                */
        __u32              ksnc_myipaddr;     /* my IP */
        __u32              ksnc_ipaddr;       /* peer's IP */
        int                ksnc_port;         /* peer's port */
@@ -341,7 +338,7 @@ typedef struct ksock_conn {
        struct kvec        *ksnc_rx_iov;      /* the iovec frags */
        int                ksnc_rx_nkiov;     /* # page frags */
        lnet_kiov_t        *ksnc_rx_kiov;     /* the page frags */
-       ksock_rxiovspace_t ksnc_rx_iov_space; /* space for frag descriptors */
+       union ksock_rxiovspace ksnc_rx_iov_space; /* space for frag descriptors */
        __u32              ksnc_rx_csum;      /* partial checksum for incoming
                                               * data */
        void               *ksnc_cookie;      /* rx lnet_finalize passthru arg
@@ -357,7 +354,7 @@ typedef struct ksock_conn {
        struct list_head   ksnc_tx_list;      /* where I enq waiting for output
                                               * space */
        struct list_head   ksnc_tx_queue;     /* packets waiting to be sent */
-       ksock_tx_t         *ksnc_tx_carrier;  /* next TX that can carry a LNet
+       struct ksock_tx         *ksnc_tx_carrier;  /* next TX that can carry a LNet
                                               * message or ZC-ACK */
        unsigned long      ksnc_tx_deadline;  /* when (in jiffies) tx times out
                                               */
@@ -367,9 +364,9 @@ typedef struct ksock_conn {
        int                ksnc_tx_scheduled; /* being progressed */
        unsigned long      ksnc_tx_last_post; /* time stamp of the last posted
                                               * TX */
-} ksock_conn_t;
+};
 
-typedef struct ksock_route {
+struct ksock_route {
        struct list_head  ksnr_list;           /* chain on peer route list */
        struct list_head  ksnr_connd_list;     /* chain on ksnr_connd_routes */
        struct ksock_peer *ksnr_peer;          /* owning peer */
@@ -389,11 +386,11 @@ typedef struct ksock_route {
        unsigned int      ksnr_share_count;    /* created explicitly? */
        int               ksnr_conn_count;     /* # conns established by this
                                                * route */
-} ksock_route_t;
+};
 
 #define SOCKNAL_KEEPALIVE_PING 1 /* cookie for keepalive ping */
 
-typedef struct ksock_peer {
+struct ksock_peer {
        struct list_head   ksnp_list;           /* stash on global peer list */
        unsigned long      ksnp_last_alive;     /* when (in jiffies) I was last
                                                 * alive */
@@ -420,49 +417,49 @@ typedef struct ksock_peer {
 
        /* preferred local interfaces */
        __u32              ksnp_passive_ips[LNET_MAX_INTERFACES];
-} ksock_peer_t;
+};
 
-typedef struct ksock_connreq {
+struct ksock_connreq {
        struct list_head ksncr_list;  /* stash on ksnd_connd_connreqs */
        lnet_ni_t        *ksncr_ni;   /* chosen NI */
        struct socket    *ksncr_sock; /* accepted socket */
-} ksock_connreq_t;
+};
 
-extern ksock_nal_data_t ksocknal_data;
-extern ksock_tunables_t ksocknal_tunables;
+extern struct ksock_nal_data ksocknal_data;
+extern struct ksock_tunables ksocknal_tunables;
 
 #define SOCKNAL_MATCH_NO  0 /* TX can't match type of connection */
 #define SOCKNAL_MATCH_YES 1 /* TX matches type of connection */
 #define SOCKNAL_MATCH_MAY 2 /* TX can be sent on the connection, but not
                             * preferred */
 
-typedef struct ksock_proto {
+struct ksock_proto {
        /* version number of protocol */
        int        pro_version;
 
        /* handshake function */
-       int        (*pro_send_hello)(ksock_conn_t *, ksock_hello_msg_t *);
+       int        (*pro_send_hello)(struct ksock_conn *, ksock_hello_msg_t *);
 
        /* handshake function */
-       int        (*pro_recv_hello)(ksock_conn_t *, ksock_hello_msg_t *, int);
+       int        (*pro_recv_hello)(struct ksock_conn *, ksock_hello_msg_t *, int);
 
        /* message pack */
-       void       (*pro_pack)(ksock_tx_t *);
+       void       (*pro_pack)(struct ksock_tx *);
 
        /* message unpack */
        void       (*pro_unpack)(ksock_msg_t *);
 
        /* queue tx on the connection */
-       ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *);
+       struct ksock_tx *(*pro_queue_tx_msg)(struct ksock_conn *, struct ksock_tx *);
 
        /* queue ZC ack on the connection */
-       int        (*pro_queue_tx_zcack)(ksock_conn_t *, ksock_tx_t *, __u64);
+       int        (*pro_queue_tx_zcack)(struct ksock_conn *, struct ksock_tx *, __u64);
 
        /* handle ZC request */
-       int        (*pro_handle_zcreq)(ksock_conn_t *, __u64, int);
+       int        (*pro_handle_zcreq)(struct ksock_conn *, __u64, int);
 
        /* handle ZC ACK */
-       int        (*pro_handle_zcack)(ksock_conn_t *, __u64, __u64);
+       int        (*pro_handle_zcack)(struct ksock_conn *, __u64, __u64);
 
        /*
         * msg type matches the connection type:
@@ -471,12 +468,12 @@ typedef struct ksock_proto {
         *   return MATCH_YES : matching type
         *   return MATCH_MAY : can be backup
         */
-       int        (*pro_match_tx)(ksock_conn_t *, ksock_tx_t *, int);
-} ksock_proto_t;
+       int        (*pro_match_tx)(struct ksock_conn *, struct ksock_tx *, int);
+};
 
-extern ksock_proto_t ksocknal_protocol_v1x;
-extern ksock_proto_t ksocknal_protocol_v2x;
-extern ksock_proto_t ksocknal_protocol_v3x;
+extern struct ksock_proto ksocknal_protocol_v1x;
+extern struct ksock_proto ksocknal_protocol_v2x;
+extern struct ksock_proto ksocknal_protocol_v3x;
 
 #define KSOCK_PROTO_V1_MAJOR LNET_PROTO_TCP_VERSION_MAJOR
 #define KSOCK_PROTO_V1_MINOR LNET_PROTO_TCP_VERSION_MINOR
@@ -517,17 +514,17 @@ ksocknal_nid2peerlist(lnet_nid_t nid)
 }
 
 static inline void
-ksocknal_conn_addref(ksock_conn_t *conn)
+ksocknal_conn_addref(struct ksock_conn *conn)
 {
        LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
        atomic_inc(&conn->ksnc_conn_refcount);
 }
 
-void ksocknal_queue_zombie_conn(ksock_conn_t *conn);
-void ksocknal_finalize_zcreq(ksock_conn_t *conn);
+void ksocknal_queue_zombie_conn(struct ksock_conn *conn);
+void ksocknal_finalize_zcreq(struct ksock_conn *conn);
 
 static inline void
-ksocknal_conn_decref(ksock_conn_t *conn)
+ksocknal_conn_decref(struct ksock_conn *conn)
 {
        LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
        if (atomic_dec_and_test(&conn->ksnc_conn_refcount))
@@ -535,7 +532,7 @@ ksocknal_conn_decref(ksock_conn_t *conn)
 }
 
 static inline int
-ksocknal_connsock_addref(ksock_conn_t *conn)
+ksocknal_connsock_addref(struct ksock_conn *conn)
 {
        int rc = -ESHUTDOWN;
 
@@ -551,7 +548,7 @@ ksocknal_connsock_addref(ksock_conn_t *conn)
 }
 
 static inline void
-ksocknal_connsock_decref(ksock_conn_t *conn)
+ksocknal_connsock_decref(struct ksock_conn *conn)
 {
        LASSERT(atomic_read(&conn->ksnc_sock_refcount) > 0);
        if (atomic_dec_and_test(&conn->ksnc_sock_refcount)) {
@@ -563,17 +560,17 @@ ksocknal_connsock_decref(ksock_conn_t *conn)
 }
 
 static inline void
-ksocknal_tx_addref(ksock_tx_t *tx)
+ksocknal_tx_addref(struct ksock_tx *tx)
 {
        LASSERT(atomic_read(&tx->tx_refcount) > 0);
        atomic_inc(&tx->tx_refcount);
 }
 
-void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx);
-void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx);
+void ksocknal_tx_prep(struct ksock_conn *, struct ksock_tx *tx);
+void ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx);
 
 static inline void
-ksocknal_tx_decref(ksock_tx_t *tx)
+ksocknal_tx_decref(struct ksock_tx *tx)
 {
        LASSERT(atomic_read(&tx->tx_refcount) > 0);
        if (atomic_dec_and_test(&tx->tx_refcount))
@@ -581,16 +578,16 @@ ksocknal_tx_decref(ksock_tx_t *tx)
 }
 
 static inline void
-ksocknal_route_addref(ksock_route_t *route)
+ksocknal_route_addref(struct ksock_route *route)
 {
        LASSERT(atomic_read(&route->ksnr_refcount) > 0);
        atomic_inc(&route->ksnr_refcount);
 }
 
-void ksocknal_destroy_route(ksock_route_t *route);
+void ksocknal_destroy_route(struct ksock_route *route);
 
 static inline void
-ksocknal_route_decref(ksock_route_t *route)
+ksocknal_route_decref(struct ksock_route *route)
 {
        LASSERT(atomic_read(&route->ksnr_refcount) > 0);
        if (atomic_dec_and_test(&route->ksnr_refcount))
@@ -598,16 +595,16 @@ ksocknal_route_decref(ksock_route_t *route)
 }
 
 static inline void
-ksocknal_peer_addref(ksock_peer_t *peer)
+ksocknal_peer_addref(struct ksock_peer *peer)
 {
        LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
        atomic_inc(&peer->ksnp_refcount);
 }
 
-void ksocknal_destroy_peer(ksock_peer_t *peer);
+void ksocknal_destroy_peer(struct ksock_peer *peer);
 
 static inline void
-ksocknal_peer_decref(ksock_peer_t *peer)
+ksocknal_peer_decref(struct ksock_peer *peer)
 {
        LASSERT(atomic_read(&peer->ksnp_refcount) > 0);
        if (atomic_dec_and_test(&peer->ksnp_refcount))
@@ -625,71 +622,71 @@ int ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg,
 int ksocknal_accept(lnet_ni_t *ni, struct socket *sock);
 
 int ksocknal_add_peer(lnet_ni_t *ni, lnet_process_id_t id, __u32 ip, int port);
-ksock_peer_t *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
-ksock_peer_t *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
-void ksocknal_peer_failed(ksock_peer_t *peer);
-int ksocknal_create_conn(lnet_ni_t *ni, ksock_route_t *route,
+struct ksock_peer *ksocknal_find_peer_locked(lnet_ni_t *ni, lnet_process_id_t id);
+struct ksock_peer *ksocknal_find_peer(lnet_ni_t *ni, lnet_process_id_t id);
+void ksocknal_peer_failed(struct ksock_peer *peer);
+int ksocknal_create_conn(lnet_ni_t *ni, struct ksock_route *route,
                         struct socket *sock, int type);
-void ksocknal_close_conn_locked(ksock_conn_t *conn, int why);
-void ksocknal_terminate_conn(ksock_conn_t *conn);
-void ksocknal_destroy_conn(ksock_conn_t *conn);
-int  ksocknal_close_peer_conns_locked(ksock_peer_t *peer,
+void ksocknal_close_conn_locked(struct ksock_conn *conn, int why);
+void ksocknal_terminate_conn(struct ksock_conn *conn);
+void ksocknal_destroy_conn(struct ksock_conn *conn);
+int  ksocknal_close_peer_conns_locked(struct ksock_peer *peer,
                                      __u32 ipaddr, int why);
-int ksocknal_close_conn_and_siblings(ksock_conn_t *conn, int why);
+int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why);
 int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
-ksock_conn_t *ksocknal_find_conn_locked(ksock_peer_t *peer,
-                                       ksock_tx_t *tx, int nonblk);
+struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer *peer,
+                                       struct ksock_tx *tx, int nonblk);
 
-int  ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx,
+int  ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx,
                            lnet_process_id_t id);
-ksock_tx_t *ksocknal_alloc_tx(int type, int size);
-void ksocknal_free_tx(ksock_tx_t *tx);
-ksock_tx_t *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
-void ksocknal_next_tx_carrier(ksock_conn_t *conn);
-void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn);
+struct ksock_tx *ksocknal_alloc_tx(int type, int size);
+void ksocknal_free_tx(struct ksock_tx *tx);
+struct ksock_tx *ksocknal_alloc_tx_noop(__u64 cookie, int nonblk);
+void ksocknal_next_tx_carrier(struct ksock_conn *conn);
+void ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn);
 void ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error);
 void ksocknal_notify(lnet_ni_t *ni, lnet_nid_t gw_nid, int alive);
 void ksocknal_query(struct lnet_ni *ni, lnet_nid_t nid, unsigned long *when);
 int ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name);
 void ksocknal_thread_fini(void);
-void ksocknal_launch_all_connections_locked(ksock_peer_t *peer);
-ksock_route_t *ksocknal_find_connectable_route_locked(ksock_peer_t *peer);
-ksock_route_t *ksocknal_find_connecting_route_locked(ksock_peer_t *peer);
-int ksocknal_new_packet(ksock_conn_t *conn, int skip);
+void ksocknal_launch_all_connections_locked(struct ksock_peer *peer);
+struct ksock_route *ksocknal_find_connectable_route_locked(struct ksock_peer *peer);
+struct ksock_route *ksocknal_find_connecting_route_locked(struct ksock_peer *peer);
+int ksocknal_new_packet(struct ksock_conn *conn, int skip);
 int ksocknal_scheduler(void *arg);
 int ksocknal_connd(void *arg);
 int ksocknal_reaper(void *arg);
-int ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+int ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn,
                        lnet_nid_t peer_nid, ksock_hello_msg_t *hello);
-int ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+int ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
                        ksock_hello_msg_t *hello, lnet_process_id_t *id,
                        __u64 *incarnation);
-void ksocknal_read_callback(ksock_conn_t *conn);
-void ksocknal_write_callback(ksock_conn_t *conn);
-
-int ksocknal_lib_zc_capable(ksock_conn_t *conn);
-void ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_set_callback(struct socket *sock,  ksock_conn_t *conn);
-void ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn);
-void ksocknal_lib_push_conn(ksock_conn_t *conn);
-int ksocknal_lib_get_conn_addrs(ksock_conn_t *conn);
+void ksocknal_read_callback(struct ksock_conn *conn);
+void ksocknal_write_callback(struct ksock_conn *conn);
+
+int ksocknal_lib_zc_capable(struct ksock_conn *conn);
+void ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn);
+void ksocknal_lib_set_callback(struct socket *sock,  struct ksock_conn *conn);
+void ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn);
+void ksocknal_lib_push_conn(struct ksock_conn *conn);
+int ksocknal_lib_get_conn_addrs(struct ksock_conn *conn);
 int ksocknal_lib_setup_sock(struct socket *so);
-int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx);
-int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx);
-void ksocknal_lib_eager_ack(ksock_conn_t *conn);
-int ksocknal_lib_recv_iov(ksock_conn_t *conn);
-int ksocknal_lib_recv_kiov(ksock_conn_t *conn);
-int ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem,
+int ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx);
+int ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx);
+void ksocknal_lib_eager_ack(struct ksock_conn *conn);
+int ksocknal_lib_recv_iov(struct ksock_conn *conn);
+int ksocknal_lib_recv_kiov(struct ksock_conn *conn);
+int ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
                                   int *rxmem, int *nagle);
 
-void ksocknal_read_callback(ksock_conn_t *conn);
-void ksocknal_write_callback(ksock_conn_t *conn);
+void ksocknal_read_callback(struct ksock_conn *conn);
+void ksocknal_write_callback(struct ksock_conn *conn);
 
 int ksocknal_tunables_init(void);
 
-void ksocknal_lib_csum_tx(ksock_tx_t *tx);
+void ksocknal_lib_csum_tx(struct ksock_tx *tx);
 
-int ksocknal_lib_memory_pressure(ksock_conn_t *conn);
+int ksocknal_lib_memory_pressure(struct ksock_conn *conn);
 int ksocknal_lib_bind_thread_to_cpu(int id);
 
 #endif /* _SOCKLND_SOCKLND_H_ */
index 976fd78926e046d265ed68424262a39a164d27f1..303576d815c64adadf7b3a00355f2c43e5721251 100644 (file)
 
 #include "socklnd.h"
 
-ksock_tx_t *
+struct ksock_tx *
 ksocknal_alloc_tx(int type, int size)
 {
-       ksock_tx_t *tx = NULL;
+       struct ksock_tx *tx = NULL;
 
        if (type == KSOCK_MSG_NOOP) {
                LASSERT(size == KSOCK_NOOP_TX_SIZE);
@@ -36,7 +36,7 @@ ksocknal_alloc_tx(int type, int size)
 
                if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
                        tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
-                                           next, ksock_tx_t, tx_list);
+                                           next, struct ksock_tx, tx_list);
                        LASSERT(tx->tx_desc_size == size);
                        list_del(&tx->tx_list);
                }
@@ -61,10 +61,10 @@ ksocknal_alloc_tx(int type, int size)
        return tx;
 }
 
-ksock_tx_t *
+struct ksock_tx *
 ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
 {
-       ksock_tx_t *tx;
+       struct ksock_tx *tx;
 
        tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
        if (!tx) {
@@ -87,7 +87,7 @@ ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
 }
 
 void
-ksocknal_free_tx(ksock_tx_t *tx)
+ksocknal_free_tx(struct ksock_tx *tx)
 {
        atomic_dec(&ksocknal_data.ksnd_nactive_txs);
 
@@ -104,7 +104,7 @@ ksocknal_free_tx(ksock_tx_t *tx)
 }
 
 static int
-ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
 {
        struct kvec *iov = tx->tx_iov;
        int nob;
@@ -126,7 +126,7 @@ ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
        do {
                LASSERT(tx->tx_niov > 0);
 
-               if (nob < (int) iov->iov_len) {
+               if (nob < (int)iov->iov_len) {
                        iov->iov_base = (void *)((char *)iov->iov_base + nob);
                        iov->iov_len -= nob;
                        return rc;
@@ -141,7 +141,7 @@ ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static int
-ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
 {
        lnet_kiov_t *kiov = tx->tx_kiov;
        int nob;
@@ -179,7 +179,7 @@ ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static int
-ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
 {
        int rc;
        int bufnob;
@@ -247,7 +247,7 @@ ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static int
-ksocknal_recv_iov(ksock_conn_t *conn)
+ksocknal_recv_iov(struct ksock_conn *conn)
 {
        struct kvec *iov = conn->ksnc_rx_iov;
        int nob;
@@ -294,7 +294,7 @@ ksocknal_recv_iov(ksock_conn_t *conn)
 }
 
 static int
-ksocknal_recv_kiov(ksock_conn_t *conn)
+ksocknal_recv_kiov(struct ksock_conn *conn)
 {
        lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
        int nob;
@@ -326,7 +326,7 @@ ksocknal_recv_kiov(ksock_conn_t *conn)
        do {
                LASSERT(conn->ksnc_rx_nkiov > 0);
 
-               if (nob < (int) kiov->kiov_len) {
+               if (nob < (int)kiov->kiov_len) {
                        kiov->kiov_offset += nob;
                        kiov->kiov_len -= nob;
                        return -EAGAIN;
@@ -341,7 +341,7 @@ ksocknal_recv_kiov(ksock_conn_t *conn)
 }
 
 static int
-ksocknal_receive(ksock_conn_t *conn)
+ksocknal_receive(struct ksock_conn *conn)
 {
        /*
         * Return 1 on success, 0 on EOF, < 0 on error.
@@ -391,7 +391,7 @@ ksocknal_receive(ksock_conn_t *conn)
 }
 
 void
-ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
+ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx)
 {
        lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
        int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
@@ -412,10 +412,10 @@ ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx)
 void
 ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
 {
-       ksock_tx_t *tx;
+       struct ksock_tx *tx;
 
        while (!list_empty(txlist)) {
-               tx = list_entry(txlist->next, ksock_tx_t, tx_list);
+               tx = list_entry(txlist->next, struct ksock_tx, tx_list);
 
                if (error && tx->tx_lnetmsg) {
                        CNETERR("Deleting packet type %d len %d %s->%s\n",
@@ -435,10 +435,10 @@ ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
 }
 
 static void
-ksocknal_check_zc_req(ksock_tx_t *tx)
+ksocknal_check_zc_req(struct ksock_tx *tx)
 {
-       ksock_conn_t *conn = tx->tx_conn;
-       ksock_peer_t *peer = conn->ksnc_peer;
+       struct ksock_conn *conn = tx->tx_conn;
+       struct ksock_peer *peer = conn->ksnc_peer;
 
        /*
         * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
@@ -482,9 +482,9 @@ ksocknal_check_zc_req(ksock_tx_t *tx)
 }
 
 static void
-ksocknal_uncheck_zc_req(ksock_tx_t *tx)
+ksocknal_uncheck_zc_req(struct ksock_tx *tx)
 {
-       ksock_peer_t *peer = tx->tx_conn->ksnc_peer;
+       struct ksock_peer *peer = tx->tx_conn->ksnc_peer;
 
        LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
        LASSERT(tx->tx_zc_capable);
@@ -508,7 +508,7 @@ ksocknal_uncheck_zc_req(ksock_tx_t *tx)
 }
 
 static int
-ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
 {
        int rc;
 
@@ -583,7 +583,7 @@ ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 static void
-ksocknal_launch_connection_locked(ksock_route_t *route)
+ksocknal_launch_connection_locked(struct ksock_route *route)
 {
        /* called holding write lock on ksnd_global_lock */
 
@@ -604,9 +604,9 @@ ksocknal_launch_connection_locked(ksock_route_t *route)
 }
 
 void
-ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
+ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
 {
-       ksock_route_t *route;
+       struct ksock_route *route;
 
        /* called holding write lock on ksnd_global_lock */
        for (;;) {
@@ -619,18 +619,18 @@ ksocknal_launch_all_connections_locked(ksock_peer_t *peer)
        }
 }
 
-ksock_conn_t *
-ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
+struct ksock_conn *
+ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
 {
        struct list_head *tmp;
-       ksock_conn_t *conn;
-       ksock_conn_t *typed = NULL;
-       ksock_conn_t *fallback = NULL;
+       struct ksock_conn *conn;
+       struct ksock_conn *typed = NULL;
+       struct ksock_conn *fallback = NULL;
        int tnob = 0;
        int fnob = 0;
 
        list_for_each(tmp, &peer->ksnp_conns) {
-               ksock_conn_t *c  = list_entry(tmp, ksock_conn_t, ksnc_list);
+               struct ksock_conn *c  = list_entry(tmp, struct ksock_conn, ksnc_list);
                int nob = atomic_read(&c->ksnc_tx_nob) +
                        c->ksnc_sock->sk->sk_wmem_queued;
                int rc;
@@ -677,7 +677,7 @@ ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk)
 }
 
 void
-ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
 {
        conn->ksnc_proto->pro_pack(tx);
 
@@ -687,11 +687,11 @@ ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 void
-ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
+ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
 {
-       ksock_sched_t *sched = conn->ksnc_scheduler;
+       struct ksock_sched *sched = conn->ksnc_scheduler;
        ksock_msg_t *msg = &tx->tx_msg;
-       ksock_tx_t *ztx = NULL;
+       struct ksock_tx *ztx = NULL;
        int bufnob = 0;
 
        /*
@@ -784,15 +784,15 @@ ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn)
        spin_unlock_bh(&sched->kss_lock);
 }
 
-ksock_route_t *
-ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
+struct ksock_route *
+ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
 {
        unsigned long now = cfs_time_current();
        struct list_head *tmp;
-       ksock_route_t *route;
+       struct ksock_route *route;
 
        list_for_each(tmp, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
@@ -820,14 +820,14 @@ ksocknal_find_connectable_route_locked(ksock_peer_t *peer)
        return NULL;
 }
 
-ksock_route_t *
-ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
+struct ksock_route *
+ksocknal_find_connecting_route_locked(struct ksock_peer *peer)
 {
        struct list_head *tmp;
-       ksock_route_t *route;
+       struct ksock_route *route;
 
        list_for_each(tmp, &peer->ksnp_routes) {
-               route = list_entry(tmp, ksock_route_t, ksnr_list);
+               route = list_entry(tmp, struct ksock_route, ksnr_list);
 
                LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
 
@@ -839,10 +839,10 @@ ksocknal_find_connecting_route_locked(ksock_peer_t *peer)
 }
 
 int
-ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
+ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx, lnet_process_id_t id)
 {
-       ksock_peer_t *peer;
-       ksock_conn_t *conn;
+       struct ksock_peer *peer;
+       struct ksock_conn *conn;
        rwlock_t *g_lock;
        int retry;
        int rc;
@@ -942,7 +942,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
        lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
        unsigned int payload_offset = lntmsg->msg_offset;
        unsigned int payload_nob = lntmsg->msg_len;
-       ksock_tx_t *tx;
+       struct ksock_tx *tx;
        int desc_size;
        int rc;
 
@@ -960,10 +960,10 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
        LASSERT(!in_interrupt());
 
        if (payload_iov)
-               desc_size = offsetof(ksock_tx_t,
+               desc_size = offsetof(struct ksock_tx,
                                     tx_frags.virt.iov[1 + payload_niov]);
        else
-               desc_size = offsetof(ksock_tx_t,
+               desc_size = offsetof(struct ksock_tx,
                                     tx_frags.paged.kiov[payload_niov]);
 
        if (lntmsg->msg_vmflush)
@@ -1037,7 +1037,7 @@ ksocknal_thread_fini(void)
 }
 
 int
-ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
+ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
 {
        static char ksocknal_slop_buffer[4096];
 
@@ -1120,7 +1120,7 @@ ksocknal_new_packet(ksock_conn_t *conn, int nob_to_skip)
 }
 
 static int
-ksocknal_process_receive(ksock_conn_t *conn)
+ksocknal_process_receive(struct ksock_conn *conn)
 {
        lnet_hdr_t *lhdr;
        lnet_process_id_t *id;
@@ -1328,8 +1328,8 @@ ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
              unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
              unsigned int offset, unsigned int mlen, unsigned int rlen)
 {
-       ksock_conn_t *conn = private;
-       ksock_sched_t *sched = conn->ksnc_scheduler;
+       struct ksock_conn *conn = private;
+       struct ksock_sched *sched = conn->ksnc_scheduler;
 
        LASSERT(mlen <= rlen);
        LASSERT(niov <= LNET_MAX_IOV);
@@ -1382,7 +1382,7 @@ ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
 }
 
 static inline int
-ksocknal_sched_cansleep(ksock_sched_t *sched)
+ksocknal_sched_cansleep(struct ksock_sched *sched)
 {
        int rc;
 
@@ -1399,9 +1399,9 @@ ksocknal_sched_cansleep(ksock_sched_t *sched)
 int ksocknal_scheduler(void *arg)
 {
        struct ksock_sched_info *info;
-       ksock_sched_t *sched;
-       ksock_conn_t *conn;
-       ksock_tx_t *tx;
+       struct ksock_sched *sched;
+       struct ksock_conn *conn;
+       struct ksock_tx *tx;
        int rc;
        int nloops = 0;
        long id = (long)arg;
@@ -1426,7 +1426,7 @@ int ksocknal_scheduler(void *arg)
 
                if (!list_empty(&sched->kss_rx_conns)) {
                        conn = list_entry(sched->kss_rx_conns.next,
-                                         ksock_conn_t, ksnc_rx_list);
+                                         struct ksock_conn, ksnc_rx_list);
                        list_del(&conn->ksnc_rx_list);
 
                        LASSERT(conn->ksnc_rx_scheduled);
@@ -1481,7 +1481,7 @@ int ksocknal_scheduler(void *arg)
                        }
 
                        conn = list_entry(sched->kss_tx_conns.next,
-                                         ksock_conn_t, ksnc_tx_list);
+                                         struct ksock_conn, ksnc_tx_list);
                        list_del(&conn->ksnc_tx_list);
 
                        LASSERT(conn->ksnc_tx_scheduled);
@@ -1489,7 +1489,7 @@ int ksocknal_scheduler(void *arg)
                        LASSERT(!list_empty(&conn->ksnc_tx_queue));
 
                        tx = list_entry(conn->ksnc_tx_queue.next,
-                                       ksock_tx_t, tx_list);
+                                       struct ksock_tx, tx_list);
 
                        if (conn->ksnc_tx_carrier == tx)
                                ksocknal_next_tx_carrier(conn);
@@ -1575,9 +1575,9 @@ int ksocknal_scheduler(void *arg)
  * Add connection to kss_rx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_read_callback(ksock_conn_t *conn)
+void ksocknal_read_callback(struct ksock_conn *conn)
 {
-       ksock_sched_t *sched;
+       struct ksock_sched *sched;
 
        sched = conn->ksnc_scheduler;
 
@@ -1600,9 +1600,9 @@ void ksocknal_read_callback(ksock_conn_t *conn)
  * Add connection to kss_tx_conns of scheduler
  * and wakeup the scheduler.
  */
-void ksocknal_write_callback(ksock_conn_t *conn)
+void ksocknal_write_callback(struct ksock_conn *conn)
 {
-       ksock_sched_t *sched;
+       struct ksock_sched *sched;
 
        sched = conn->ksnc_scheduler;
 
@@ -1623,7 +1623,7 @@ void ksocknal_write_callback(ksock_conn_t *conn)
        spin_unlock_bh(&sched->kss_lock);
 }
 
-static ksock_proto_t *
+static struct ksock_proto *
 ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
 {
        __u32 version = 0;
@@ -1666,11 +1666,11 @@ ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
 }
 
 int
-ksocknal_send_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn,
                    lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
 {
        /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
-       ksock_net_t *net = (ksock_net_t *)ni->ni_data;
+       struct ksock_net *net = (struct ksock_net *)ni->ni_data;
 
        LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
 
@@ -1704,7 +1704,7 @@ ksocknal_invert_type(int type)
 }
 
 int
-ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
+ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
                    ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
                    __u64 *incarnation)
 {
@@ -1718,7 +1718,7 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
        int timeout;
        int proto_match;
        int rc;
-       ksock_proto_t *proto;
+       struct ksock_proto *proto;
        lnet_process_id_t recv_id;
 
        /* socket type set on active connections - not set on passive */
@@ -1847,10 +1847,10 @@ ksocknal_recv_hello(lnet_ni_t *ni, ksock_conn_t *conn,
 }
 
 static int
-ksocknal_connect(ksock_route_t *route)
+ksocknal_connect(struct ksock_route *route)
 {
        LIST_HEAD(zombies);
-       ksock_peer_t *peer = route->ksnr_peer;
+       struct ksock_peer *peer = route->ksnr_peer;
        int type;
        int wanted;
        struct socket *sock;
@@ -1989,7 +1989,7 @@ ksocknal_connect(ksock_route_t *route)
        if (!list_empty(&peer->ksnp_tx_queue) &&
            !peer->ksnp_accepting &&
            !ksocknal_find_connecting_route_locked(peer)) {
-               ksock_conn_t *conn;
+               struct ksock_conn *conn;
 
                /*
                 * ksnp_tx_queue is queued on a conn on successful
@@ -1997,7 +1997,7 @@ ksocknal_connect(ksock_route_t *route)
                 */
                if (!list_empty(&peer->ksnp_conns)) {
                        conn = list_entry(peer->ksnp_conns.next,
-                                         ksock_conn_t, ksnc_list);
+                                         struct ksock_conn, ksnc_list);
                        LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
                }
 
@@ -2131,10 +2131,10 @@ ksocknal_connd_check_stop(time64_t sec, long *timeout)
  * Go through connd_routes queue looking for a route that we can process
  * right now, @timeout_p can be updated if we need to come back later
  */
-static ksock_route_t *
+static struct ksock_route *
 ksocknal_connd_get_route_locked(signed long *timeout_p)
 {
-       ksock_route_t *route;
+       struct ksock_route *route;
        unsigned long now;
 
        now = cfs_time_current();
@@ -2158,7 +2158,7 @@ int
 ksocknal_connd(void *arg)
 {
        spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
-       ksock_connreq_t *cr;
+       struct ksock_connreq *cr;
        wait_queue_t wait;
        int nloops = 0;
        int cons_retry = 0;
@@ -2174,7 +2174,7 @@ ksocknal_connd(void *arg)
        ksocknal_data.ksnd_connd_running++;
 
        while (!ksocknal_data.ksnd_shuttingdown) {
-               ksock_route_t *route = NULL;
+               struct ksock_route *route = NULL;
                time64_t sec = ktime_get_real_seconds();
                long timeout = MAX_SCHEDULE_TIMEOUT;
                int dropped_lock = 0;
@@ -2192,8 +2192,8 @@ ksocknal_connd(void *arg)
 
                if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
                        /* Connection accepted by the listener */
-                       cr = list_entry(ksocknal_data.ksnd_connd_connreqs. \
-                                           next, ksock_connreq_t, ksncr_list);
+                       cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
+                                       struct ksock_connreq, ksncr_list);
 
                        list_del(&cr->ksncr_list);
                        spin_unlock_bh(connd_lock);
@@ -2267,17 +2267,17 @@ ksocknal_connd(void *arg)
        return 0;
 }
 
-static ksock_conn_t *
-ksocknal_find_timed_out_conn(ksock_peer_t *peer)
+static struct ksock_conn *
+ksocknal_find_timed_out_conn(struct ksock_peer *peer)
 {
        /* We're called with a shared lock on ksnd_global_lock */
-       ksock_conn_t *conn;
+       struct ksock_conn *conn;
        struct list_head *ctmp;
 
        list_for_each(ctmp, &peer->ksnp_conns) {
                int error;
 
-               conn = list_entry(ctmp, ksock_conn_t, ksnc_list);
+               conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
 
                /* Don't need the {get,put}connsock dance to deref ksnc_sock */
                LASSERT(!conn->ksnc_closing);
@@ -2351,10 +2351,10 @@ ksocknal_find_timed_out_conn(ksock_peer_t *peer)
 }
 
 static inline void
-ksocknal_flush_stale_txs(ksock_peer_t *peer)
+ksocknal_flush_stale_txs(struct ksock_peer *peer)
 {
-       ksock_tx_t *tx;
-       ksock_tx_t *tmp;
+       struct ksock_tx *tx;
+       struct ksock_tx *tmp;
        LIST_HEAD(stale_txs);
 
        write_lock_bh(&ksocknal_data.ksnd_global_lock);
@@ -2374,12 +2374,12 @@ ksocknal_flush_stale_txs(ksock_peer_t *peer)
 }
 
 static int
-ksocknal_send_keepalive_locked(ksock_peer_t *peer)
+ksocknal_send_keepalive_locked(struct ksock_peer *peer)
        __must_hold(&ksocknal_data.ksnd_global_lock)
 {
-       ksock_sched_t *sched;
-       ksock_conn_t *conn;
-       ksock_tx_t *tx;
+       struct ksock_sched *sched;
+       struct ksock_conn *conn;
+       struct ksock_tx *tx;
 
        if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
                return 0;
@@ -2440,9 +2440,9 @@ static void
 ksocknal_check_peer_timeouts(int idx)
 {
        struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
-       ksock_peer_t *peer;
-       ksock_conn_t *conn;
-       ksock_tx_t *tx;
+       struct ksock_peer *peer;
+       struct ksock_conn *conn;
+       struct ksock_tx *tx;
 
  again:
        /*
@@ -2483,8 +2483,8 @@ ksocknal_check_peer_timeouts(int idx)
                 * holding only shared lock
                 */
                if (!list_empty(&peer->ksnp_tx_queue)) {
-                       ksock_tx_t *tx = list_entry(peer->ksnp_tx_queue.next,
-                                                   ksock_tx_t, tx_list);
+                       struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next,
+                                                   struct ksock_tx, tx_list);
 
                        if (cfs_time_aftereq(cfs_time_current(),
                                             tx->tx_deadline)) {
@@ -2518,7 +2518,7 @@ ksocknal_check_peer_timeouts(int idx)
                }
 
                tx = list_entry(peer->ksnp_zc_req_list.next,
-                               ksock_tx_t, tx_zc_list);
+                               struct ksock_tx, tx_zc_list);
                deadline = tx->tx_deadline;
                resid = tx->tx_resid;
                conn = tx->tx_conn;
@@ -2544,8 +2544,8 @@ int
 ksocknal_reaper(void *arg)
 {
        wait_queue_t wait;
-       ksock_conn_t *conn;
-       ksock_sched_t *sched;
+       struct ksock_conn *conn;
+       struct ksock_sched *sched;
        struct list_head enomem_conns;
        int nenomem_conns;
        long timeout;
@@ -2563,7 +2563,7 @@ ksocknal_reaper(void *arg)
        while (!ksocknal_data.ksnd_shuttingdown) {
                if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
                        conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
-                                         ksock_conn_t, ksnc_list);
+                                         struct ksock_conn, ksnc_list);
                        list_del(&conn->ksnc_list);
 
                        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2577,7 +2577,7 @@ ksocknal_reaper(void *arg)
 
                if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
                        conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
-                                         ksock_conn_t, ksnc_list);
+                                         struct ksock_conn, ksnc_list);
                        list_del(&conn->ksnc_list);
 
                        spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
@@ -2599,7 +2599,7 @@ ksocknal_reaper(void *arg)
                /* reschedule all the connections that stalled with ENOMEM... */
                nenomem_conns = 0;
                while (!list_empty(&enomem_conns)) {
-                       conn = list_entry(enomem_conns.next, ksock_conn_t,
+                       conn = list_entry(enomem_conns.next, struct ksock_conn,
                                          ksnc_tx_list);
                        list_del(&conn->ksnc_tx_list);
 
index 964b4e338fe0451cc7dc4816b0a7448d1d657555..6a17757fce1eb5fe08418aea9e2d393a937a4e50 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -37,7 +33,7 @@
 #include "socklnd.h"
 
 int
-ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
+ksocknal_lib_get_conn_addrs(struct ksock_conn *conn)
 {
        int rc = lnet_sock_getaddr(conn->ksnc_sock, 1, &conn->ksnc_ipaddr,
                                   &conn->ksnc_port);
@@ -60,7 +56,7 @@ ksocknal_lib_get_conn_addrs(ksock_conn_t *conn)
 }
 
 int
-ksocknal_lib_zc_capable(ksock_conn_t *conn)
+ksocknal_lib_zc_capable(struct ksock_conn *conn)
 {
        int caps = conn->ksnc_sock->sk->sk_route_caps;
 
@@ -75,7 +71,7 @@ ksocknal_lib_zc_capable(ksock_conn_t *conn)
 }
 
 int
-ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_lib_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
 {
        struct socket *sock = conn->ksnc_sock;
        int nob;
@@ -118,7 +114,7 @@ ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 int
-ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
+ksocknal_lib_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
 {
        struct socket *sock = conn->ksnc_sock;
        lnet_kiov_t *kiov = tx->tx_kiov;
@@ -187,7 +183,7 @@ ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx)
 }
 
 void
-ksocknal_lib_eager_ack(ksock_conn_t *conn)
+ksocknal_lib_eager_ack(struct ksock_conn *conn)
 {
        int opt = 1;
        struct socket *sock = conn->ksnc_sock;
@@ -203,7 +199,7 @@ ksocknal_lib_eager_ack(ksock_conn_t *conn)
 }
 
 int
-ksocknal_lib_recv_iov(ksock_conn_t *conn)
+ksocknal_lib_recv_iov(struct ksock_conn *conn)
 {
 #if SOCKNAL_SINGLE_FRAG_RX
        struct kvec scratch;
@@ -309,7 +305,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
 }
 
 int
-ksocknal_lib_recv_kiov(ksock_conn_t *conn)
+ksocknal_lib_recv_kiov(struct ksock_conn *conn)
 {
 #if SOCKNAL_SINGLE_FRAG_RX || !SOCKNAL_RISK_KMAP_DEADLOCK
        struct kvec scratch;
@@ -393,7 +389,7 @@ ksocknal_lib_recv_kiov(ksock_conn_t *conn)
 }
 
 void
-ksocknal_lib_csum_tx(ksock_tx_t *tx)
+ksocknal_lib_csum_tx(struct ksock_tx *tx)
 {
        int i;
        __u32 csum;
@@ -432,7 +428,7 @@ ksocknal_lib_csum_tx(ksock_tx_t *tx)
 }
 
 int
-ksocknal_lib_get_conn_tunables(ksock_conn_t *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
 {
        struct socket *sock = conn->ksnc_sock;
        int len;
@@ -562,7 +558,7 @@ ksocknal_lib_setup_sock(struct socket *sock)
 }
 
 void
-ksocknal_lib_push_conn(ksock_conn_t *conn)
+ksocknal_lib_push_conn(struct ksock_conn *conn)
 {
        struct sock *sk;
        struct tcp_sock *tp;
@@ -599,7 +595,7 @@ ksocknal_lib_push_conn(ksock_conn_t *conn)
 static void
 ksocknal_data_ready(struct sock *sk)
 {
-       ksock_conn_t *conn;
+       struct ksock_conn *conn;
 
        /* interleave correctly with closing sockets... */
        LASSERT(!in_irq());
@@ -619,7 +615,7 @@ ksocknal_data_ready(struct sock *sk)
 static void
 ksocknal_write_space(struct sock *sk)
 {
-       ksock_conn_t *conn;
+       struct ksock_conn *conn;
        int wspace;
        int min_wpace;
 
@@ -663,14 +659,14 @@ ksocknal_write_space(struct sock *sk)
 }
 
 void
-ksocknal_lib_save_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_save_callback(struct socket *sock, struct ksock_conn *conn)
 {
        conn->ksnc_saved_data_ready = sock->sk->sk_data_ready;
        conn->ksnc_saved_write_space = sock->sk->sk_write_space;
 }
 
 void
-ksocknal_lib_set_callback(struct socket *sock,  ksock_conn_t *conn)
+ksocknal_lib_set_callback(struct socket *sock,  struct ksock_conn *conn)
 {
        sock->sk->sk_user_data = conn;
        sock->sk->sk_data_ready = ksocknal_data_ready;
@@ -678,7 +674,7 @@ ksocknal_lib_set_callback(struct socket *sock,  ksock_conn_t *conn)
 }
 
 void
-ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
+ksocknal_lib_reset_callback(struct socket *sock, struct ksock_conn *conn)
 {
        /*
         * Remove conn's network callbacks.
@@ -697,10 +693,10 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
 }
 
 int
-ksocknal_lib_memory_pressure(ksock_conn_t *conn)
+ksocknal_lib_memory_pressure(struct ksock_conn *conn)
 {
        int rc = 0;
-       ksock_sched_t *sched;
+       struct ksock_sched *sched;
 
        sched = conn->ksnc_scheduler;
        spin_lock_bh(&sched->kss_lock);
index 6329cbe66573160e98f1bf9ab6faf260ca06a281..fc7eec83ac078dc1714006a362d7211786bd8f4e 100644 (file)
@@ -139,7 +139,7 @@ module_param(protocol, int, 0644);
 MODULE_PARM_DESC(protocol, "protocol version");
 #endif
 
-ksock_tunables_t ksocknal_tunables;
+struct ksock_tunables ksocknal_tunables;
 
 int ksocknal_tunables_init(void)
 {
index 32cc31e4cc29efa5ae833bb369700efb2533f7c0..82e174f6d9fef26d97343238753eaec94b51bb96 100644 (file)
@@ -38,8 +38,8 @@
  *   pro_match_tx()       : Called holding glock
  */
 
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+static struct ksock_tx *
+ksocknal_queue_tx_msg_v1(struct ksock_conn *conn, struct ksock_tx *tx_msg)
 {
        /* V1.x, just enqueue it */
        list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue);
@@ -47,9 +47,9 @@ ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg)
 }
 
 void
-ksocknal_next_tx_carrier(ksock_conn_t *conn)
+ksocknal_next_tx_carrier(struct ksock_conn *conn)
 {
-       ksock_tx_t *tx = conn->ksnc_tx_carrier;
+       struct ksock_tx *tx = conn->ksnc_tx_carrier;
 
        /* Called holding BH lock: conn->ksnc_scheduler->kss_lock */
        LASSERT(!list_empty(&conn->ksnc_tx_queue));
@@ -66,10 +66,10 @@ ksocknal_next_tx_carrier(ksock_conn_t *conn)
 }
 
 static int
-ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
-                          ksock_tx_t *tx_ack, __u64 cookie)
+ksocknal_queue_tx_zcack_v2(struct ksock_conn *conn,
+                          struct ksock_tx *tx_ack, __u64 cookie)
 {
-       ksock_tx_t *tx = conn->ksnc_tx_carrier;
+       struct ksock_tx *tx = conn->ksnc_tx_carrier;
 
        LASSERT(!tx_ack ||
                tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP);
@@ -112,10 +112,10 @@ ksocknal_queue_tx_zcack_v2(ksock_conn_t *conn,
        return 1;
 }
 
-static ksock_tx_t *
-ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
+static struct ksock_tx *
+ksocknal_queue_tx_msg_v2(struct ksock_conn *conn, struct ksock_tx *tx_msg)
 {
-       ksock_tx_t *tx  = conn->ksnc_tx_carrier;
+       struct ksock_tx *tx  = conn->ksnc_tx_carrier;
 
        /*
         * Enqueue tx_msg:
@@ -149,10 +149,10 @@ ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg)
 }
 
 static int
-ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
-                          ksock_tx_t *tx_ack, __u64 cookie)
+ksocknal_queue_tx_zcack_v3(struct ksock_conn *conn,
+                          struct ksock_tx *tx_ack, __u64 cookie)
 {
-       ksock_tx_t *tx;
+       struct ksock_tx *tx;
 
        if (conn->ksnc_type != SOCKLND_CONN_ACK)
                return ksocknal_queue_tx_zcack_v2(conn, tx_ack, cookie);
@@ -267,7 +267,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
 }
 
 static int
-ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+ksocknal_match_tx(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
 {
        int nob;
 
@@ -311,7 +311,7 @@ ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
 }
 
 static int
-ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
+ksocknal_match_tx_v3(struct ksock_conn *conn, struct ksock_tx *tx, int nonblk)
 {
        int nob;
 
@@ -355,18 +355,18 @@ ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk)
 
 /* (Sink) handle incoming ZC request from sender */
 static int
-ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
+ksocknal_handle_zcreq(struct ksock_conn *c, __u64 cookie, int remote)
 {
-       ksock_peer_t *peer = c->ksnc_peer;
-       ksock_conn_t *conn;
-       ksock_tx_t *tx;
+       struct ksock_peer *peer = c->ksnc_peer;
+       struct ksock_conn *conn;
+       struct ksock_tx *tx;
        int rc;
 
        read_lock(&ksocknal_data.ksnd_global_lock);
 
        conn = ksocknal_find_conn_locked(peer, NULL, !!remote);
        if (conn) {
-               ksock_sched_t *sched = conn->ksnc_scheduler;
+               struct ksock_sched *sched = conn->ksnc_scheduler;
 
                LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
 
@@ -399,12 +399,12 @@ ksocknal_handle_zcreq(ksock_conn_t *c, __u64 cookie, int remote)
 
 /* (Sender) handle ZC_ACK from sink */
 static int
-ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
+ksocknal_handle_zcack(struct ksock_conn *conn, __u64 cookie1, __u64 cookie2)
 {
-       ksock_peer_t *peer = conn->ksnc_peer;
-       ksock_tx_t *tx;
-       ksock_tx_t *temp;
-       ksock_tx_t *tmp;
+       struct ksock_peer *peer = conn->ksnc_peer;
+       struct ksock_tx *tx;
+       struct ksock_tx *temp;
+       struct ksock_tx *tmp;
        LIST_HEAD(zlist);
        int count;
 
@@ -446,7 +446,7 @@ ksocknal_handle_zcack(ksock_conn_t *conn, __u64 cookie1, __u64 cookie2)
 }
 
 static int
-ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
+ksocknal_send_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello)
 {
        struct socket *sock = conn->ksnc_sock;
        lnet_hdr_t *hdr;
@@ -503,7 +503,7 @@ ksocknal_send_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello)
        if (!hello->kshm_nips)
                goto out;
 
-       for (i = 0; i < (int) hello->kshm_nips; i++)
+       for (i = 0; i < (int)hello->kshm_nips; i++)
                hello->kshm_ips[i] = __cpu_to_le32(hello->kshm_ips[i]);
 
        rc = lnet_sock_write(sock, hello->kshm_ips,
@@ -521,7 +521,7 @@ out:
 }
 
 static int
-ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
+ksocknal_send_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello)
 {
        struct socket *sock = conn->ksnc_sock;
        int rc;
@@ -563,7 +563,7 @@ ksocknal_send_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello)
 }
 
 static int
-ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
+ksocknal_recv_hello_v1(struct ksock_conn *conn, ksock_hello_msg_t *hello,
                       int timeout)
 {
        struct socket *sock = conn->ksnc_sock;
@@ -622,7 +622,7 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,
                goto out;
        }
 
-       for (i = 0; i < (int) hello->kshm_nips; i++) {
+       for (i = 0; i < (int)hello->kshm_nips; i++) {
                hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
 
                if (!hello->kshm_ips[i]) {
@@ -639,7 +639,7 @@ out:
 }
 
 static int
-ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout)
 {
        struct socket *sock = conn->ksnc_sock;
        int rc;
@@ -690,7 +690,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
                return rc;
        }
 
-       for (i = 0; i < (int) hello->kshm_nips; i++) {
+       for (i = 0; i < (int)hello->kshm_nips; i++) {
                if (conn->ksnc_flip)
                        __swab32s(&hello->kshm_ips[i]);
 
@@ -705,7 +705,7 @@ ksocknal_recv_hello_v2(ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeout
 }
 
 static void
-ksocknal_pack_msg_v1(ksock_tx_t *tx)
+ksocknal_pack_msg_v1(struct ksock_tx *tx)
 {
        /* V1.x has no KSOCK_MSG_NOOP */
        LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
@@ -719,7 +719,7 @@ ksocknal_pack_msg_v1(ksock_tx_t *tx)
 }
 
 static void
-ksocknal_pack_msg_v2(ksock_tx_t *tx)
+ksocknal_pack_msg_v2(struct ksock_tx *tx)
 {
        tx->tx_iov[0].iov_base = &tx->tx_msg;
 
@@ -755,7 +755,7 @@ ksocknal_unpack_msg_v2(ksock_msg_t *msg)
        return;  /* Do nothing */
 }
 
-ksock_proto_t  ksocknal_protocol_v1x = {
+struct ksock_proto ksocknal_protocol_v1x = {
        .pro_version        = KSOCK_PROTO_V1,
        .pro_send_hello     = ksocknal_send_hello_v1,
        .pro_recv_hello     = ksocknal_recv_hello_v1,
@@ -768,7 +768,7 @@ ksock_proto_t  ksocknal_protocol_v1x = {
        .pro_match_tx       = ksocknal_match_tx
 };
 
-ksock_proto_t  ksocknal_protocol_v2x = {
+struct ksock_proto ksocknal_protocol_v2x = {
        .pro_version        = KSOCK_PROTO_V2,
        .pro_send_hello     = ksocknal_send_hello_v2,
        .pro_recv_hello     = ksocknal_recv_hello_v2,
@@ -781,7 +781,7 @@ ksock_proto_t  ksocknal_protocol_v2x = {
        .pro_match_tx       = ksocknal_match_tx
 };
 
-ksock_proto_t  ksocknal_protocol_v3x = {
+struct ksock_proto ksocknal_protocol_v3x = {
        .pro_version        = KSOCK_PROTO_V3,
        .pro_send_hello     = ksocknal_send_hello_v2,
        .pro_recv_hello     = ksocknal_recv_hello_v2,
index 8c260c3d5da4c408ac91b893a752b2b2e39b8843..42b15a76918398831325518799dd758011aaf465 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -366,12 +362,12 @@ void libcfs_debug_dumplog(void)
         * get to schedule()
         */
        init_waitqueue_entry(&wait, current);
-       set_current_state(TASK_INTERRUPTIBLE);
        add_wait_queue(&debug_ctlwq, &wait);
 
        dumper = kthread_run(libcfs_debug_dumplog_thread,
                             (void *)(long)current_pid(),
                             "libcfs_debug_dumper");
+       set_current_state(TASK_INTERRUPTIBLE);
        if (IS_ERR(dumper))
                pr_err("LustreError: cannot start log dump thread: %ld\n",
                       PTR_ERR(dumper));
index 086e690bd6f2b45b8c35964556170ea6b356ba10..9288ee08d1f724874dcc18f6f5a869c45f04edab 100644 (file)
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see http://www.gnu.org/licenses
  *
- * Please contact Oracle Corporation, Inc., 500 Oracle Parkway, Redwood Shores,
- * CA 94065 USA or visit www.oracle.com if you need additional information or
- * have any questions.
- *
  * GPL HEADER END
  */
 /*
index cc45ed82b2bef963dcd8b81404f4a801a8887048..23283b6e09ab1903e97341ac20313605c9215f39 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 50ac1536db4b88b379eb9ab26ecde8aaf61c7c8d..fc697cdfcdaf678ab55704acf263375486da840f 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 84f9b7b4758115fcbb5d914c34a929efbc25bfe6..5c0116ade90949caa46e2c077cfec870e39b7b3e 100644 (file)
@@ -99,6 +99,7 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
                                         (*type)->cht_size);
 
        if (err != 0) {
+               ahash_request_free(*req);
                crypto_free_ahash(tfm);
                return err;
        }
index 13d31e8a931da33cfd58b6840347e7ec7fcb2736..3e22cad18a8b7d06a8674e429f333e1a6433a5e6 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 638e4b33d3a94ee6ce7e508ded3f973daba19f27..435b784c52f8d2a8b37156df220b2ae98383dc9b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 86f32ffc5d046b8f6dba000cb8fd7587b8afa4c0..a6a76a681ea9af0e03709fcacbe50dbbc369e636 100644 (file)
@@ -11,7 +11,7 @@
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  */
 /*
index d89f71ee45b2920adbd52e2ab62a3049ce77f4c2..38308f8b6aae907fe0eace9075f171d8b66eff31 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index bbe19a684c81b687e7a88748982979b7ea13cd39..291d286eab481de35bd66f38f01a6a70c2ad50cf 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 91c2ae8f9d677ec1251eab26f610a3da6082a379..8b551d2708babfeed9640f0bdeee5d9a2a2c94f1 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index f2d041118cf7dfc3bd447247185473744287b5b7..86b4d25cad46b0a9a92a48f35603b78b2daff4f4 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index c75ae9a68e760e2096f435617794e6ff4f6c2615..a9bdb284fd15777149da26e11fa7a24d384aaa1d 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 7739b9469c5a87bbc0516b7d2fad14a78ed92fdc..1c7efdfaffcf21ebc81304f6488414d4690cdedf 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index ac84e7f4c859e917ca186e1aac7ad2bf2dec48e7..d878676bc375e369794fc0896f3aaae9c8fe1acb 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 92236ae59e49c62c242a82bf02390e0fbad31271..e98c818a14fb7f6f14a303164fbb539f8c55f802 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 1452bb3ad9eb03f8f8e77e7ce6ca90868e300387..8c50c99d82d5df054bd0df95d0b8c7fa15ec0fb8 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index fe0dbe7468e7d38cce0d31a4c8e9b5a5a8842c63..346db892f27540b48b74d45e3c41aac91d184465 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -1677,7 +1673,7 @@ lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
        if (!ni || !config)
                return;
 
-       net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
+       net_config = (struct lnet_ioctl_net_config *)config->cfg_bulk;
        if (!net_config)
                return;
 
index 480cc9c6caabdadadc61706f34ed8a4ea17e2abe..a72afdf68bb274ebebf790e0161eef7f3fcffff0 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index adbcadbab1be1562d12c5f9ef206f08be355b1a5..d05c6cc797f676ba42ae397e4bfa99d7d4ab57f2 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 75d31217bf92423b04d47e622eb47e47d87f225b..1834bf7a27effbe2080797ff81ade09fd7581760 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e671aed373df7124dc40855edf12a24d5fde3ea3..b430046dc294f04572964b00e8146c4e9c6b4b3e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index c5d5bedb3128164c44e83dad478f78bd59a66f34..e6d3b801d87d4d1116be7e287b62f2f633810933 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index f879d7f2870815d1bb839db5b7b80d71a352de2b..910e106e221d07b2686e5ca98e9b68fdf36bf9bb 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 468eda611bf8ad8c1c80d2a6861df312f7c76cb7..08402712a452cc697513fc2676f91659834092a2 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 246b5c141d01195e42fc796af680478b4c5859a8..4ffbd3e441e8b774fc441101cab79e6a26a46c2e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -200,7 +196,7 @@ static int __init lnet_init(void)
                 * Have to schedule a separate thread to avoid deadlocking
                 * in modload
                 */
-               (void) kthread_run(lnet_configure, NULL, "lnet_initd");
+               (void)kthread_run(lnet_configure, NULL, "lnet_initd");
        }
 
        return 0;
index 7d76f28d3a7a17b06d3ac2c7d7f2487a654d6d54..e4aceb71c4ecb838f88a672aa0c2c316b2c9fee1 100644 (file)
@@ -760,9 +760,7 @@ lnet_delay_rule_add(struct lnet_fault_attr *attr)
                wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
        }
 
-       init_timer(&rule->dl_timer);
-       rule->dl_timer.function = delay_timer_cb;
-       rule->dl_timer.data = (unsigned long)rule;
+       setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule);
 
        spin_lock_init(&rule->dl_lock);
        INIT_LIST_HEAD(&rule->dl_msg_list);
index ebf468fbc64f409e117bedfadc91622db46360c2..a6d7a6159b8f6b15af858fe117301b1e4552fbd3 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index b026feebc03a8537021fa737e7db14661824353f..e8061916c241495c0833ea30b54b1498856bfaf4 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index b01dc424c514e0d5d48ca0f5e8dfafaf94dd7899..063543233035c599e395407d7b964b02e952a3b3 100644 (file)
@@ -18,6 +18,7 @@
  */
 
 #define DEBUG_SUBSYSTEM S_LNET
+#include <linux/completion.h>
 #include "../../include/linux/lnet/lib-lnet.h"
 
 #define LNET_NRB_TINY_MIN      512     /* min value for each CPT */
@@ -1065,7 +1066,7 @@ lnet_router_checker_start(void)
                return -EINVAL;
        }
 
-       sema_init(&the_lnet.ln_rc_signal, 0);
+       init_completion(&the_lnet.ln_rc_signal);
 
        rc = LNetEQAlloc(0, lnet_router_checker_event, &the_lnet.ln_rc_eqh);
        if (rc) {
@@ -1079,7 +1080,7 @@ lnet_router_checker_start(void)
                rc = PTR_ERR(task);
                CERROR("Can't start router checker thread: %d\n", rc);
                /* block until event callback signals exit */
-               down(&the_lnet.ln_rc_signal);
+               wait_for_completion(&the_lnet.ln_rc_signal);
                rc = LNetEQFree(the_lnet.ln_rc_eqh);
                LASSERT(!rc);
                the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
@@ -1112,7 +1113,7 @@ lnet_router_checker_stop(void)
        wake_up(&the_lnet.ln_rc_waitq);
 
        /* block until event callback signals exit */
-       down(&the_lnet.ln_rc_signal);
+       wait_for_completion(&the_lnet.ln_rc_signal);
        LASSERT(the_lnet.ln_rc_state == LNET_RC_STATE_SHUTDOWN);
 
        rc = LNetEQFree(the_lnet.ln_rc_eqh);
@@ -1295,7 +1296,7 @@ rescan:
        lnet_prune_rc_data(1); /* wait for UNLINK */
 
        the_lnet.ln_rc_state = LNET_RC_STATE_SHUTDOWN;
-       up(&the_lnet.ln_rc_signal);
+       complete(&the_lnet.ln_rc_signal);
        /* The unlink event callback will signal final completion */
        return 0;
 }
index a63d86c4c10d6a185148c7d74a4eb450e67c512b..13d0454e7fcb3e82693195ab31bb353e45eb4004 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 408c614b6ca33af0f26865b0172a0dd5f3998a23..b786f8b4a73dc5b5344e3bfc9aa5296443199def 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 6f687581117deccec1d2635ac7bbc48f975147a8..1be3cad727aebd88b20beacb4f4f61326b8835e8 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 90c3385a355c11db57cf3bd71737330ae6e1eb19..7ec6fc96959e4277b6f28e88472bc77e4a010ce1 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index a03e52d29d3f4b87b2cb46896987fef6fe00c201..4c33621f06da0851a9d01453ef4c9c015267e16e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index becd22e41da9d1569b29400a052d7ded29c93ef6..78b1477326154fed5b412e970714e549bc6d5296 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 30e4f71f14c2040e41e9b3f7601d81310557ed15..c2f121f44d3387e76e9ca68d9d9e7af9f68d09b3 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index cc046b1d4d0abb645135a42c35123f794d0efb1f..71485f992297c74dd31b446bb84155dcbb2c8639 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index ad26fe9dd4af6bd19aa16b2869cd24fdd341cf5a..9331ca4e3606261268c10448a2876bb2c35e6039 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 3c45a7cfae18498371b727e0776a9f6f01e67bfe..3b26d6eb42409d7d59678339cb6a5e4d7fc2c513 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index c9b904cade1648ca7edd350d665ceb1ed284dcb8..4ab2ee26400439c71554e4bd3d8fd1726fb16f14 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 4eac1c9e639f8fb4e6d5feeacfe2e6d3fdaba04e..d033ac03d953c46ec19a4f6befcc3192551bcb73 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- * copy of GPLv2].
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index b6c4aae007af3e7294ca71bc2d458465f949c816..dcd22580b1f0cd98782110f1b6da0636cb61db15 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index f1fbebd8a67c7a297eb19e6d8e3dc92c05ec37a5..441d6d6b4f8eeee5708cd5811d3dda42ad91ffa2 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 8ac7cd4d6fdb0cc4168bc12ee61391cc9a36413d..9f5d75f166e7c9eda45a4a795b803c133e38e4b0 100644 (file)
@@ -54,9 +54,3 @@ config LUSTRE_TRANSLATE_ERRNOS
        bool
        depends on LUSTRE_FS && !X86
        default y
-
-config LUSTRE_LLITE_LLOOP
-       tristate "Lustre virtual block device"
-       depends on LUSTRE_FS && BLOCK
-       depends on !PPC_64K_PAGES && !ARM64_64K_PAGES && !MICROBLAZE_64K_PAGES && !PAGE_SIZE_64KB && !IA64_PAGE_SIZE_64KB && !PARISC_PAGE_SIZE_64KB
-       default m
index b79a813977cfd57b1d545c1c2cbc26d8025c31bf..5c53773ecc5a68d492f4c8284342489988b24bf6 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index dd65159ebb3802b8c0334d649af792fd01f1aa5e..99ae7eb6720e16c535a491cec765db2b32bab281 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 3a4df626462f724cf8b81e32dc412d4a97b64421..454744d25956f9f767d4664960006ef3a8a8f19e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -98,8 +94,10 @@ static int seq_client_rpc(struct lu_client_seq *seq,
                 * request here, otherwise if MDT0 is failed(umounted),
                 * it can not release the export of MDT0
                 */
-               if (seq->lcs_type == LUSTRE_SEQ_DATA)
-                       req->rq_no_delay = req->rq_no_resend = 1;
+               if (seq->lcs_type == LUSTRE_SEQ_DATA) {
+                       req->rq_no_delay = 1;
+                       req->rq_no_resend = 1;
+               }
                debug_mask = D_CONSOLE;
        } else {
                if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
index 1f0e78686278dd69f6b6c9bc67ccbe62c7ae34ee..81b7ca9ea2fdc1dfddb6b0d4d5be44f05f92cfb9 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 5a04e99d9249a4008fce9083a59076b2f1b0495b..0100a935f4ff292798f6c62f42d3213d59dca1cc 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 75d6a48637a9971fe08520ae1d23c067cacbe500..f0efe5b9fbec4afb32ab40e1fc974c73f5dbfd35 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 304c0ec268c9fc117b3dff7e1e7f0f84c0113bcb..e59d626a15481c14a4304badf0bf86830c2538c7 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index ca898befeba621a26e7542288e56442f0fa2bbc1..61ac420798afbe49c0f04b2da37bc4b9bd494e9f 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index d4c33dd110abb4e5eaa988261d0e34a8414db23c..3cd4a2577d90d410801fe78c04bdda17bc59ec99 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -2326,7 +2322,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
  */
 struct cl_client_cache {
        /**
-        * # of users (OSCs)
+        * # of client cache refcount
+        * # of users (OSCs) + 2 (held by llite and lov)
         */
        atomic_t                ccc_users;
        /**
@@ -2361,6 +2358,13 @@ struct cl_client_cache {
 
 };
 
+/**
+ * cl_cache functions
+ */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max);
+void cl_cache_incref(struct cl_client_cache *cache);
+void cl_cache_decref(struct cl_client_cache *cache);
+
 /** @} cl_page */
 
 /** \defgroup cl_lock cl_lock
index f6df3f33e770d29e16575febcfbc6e2c2b2ae479..4a15228b5570ca72f6416df82912c63f70e7ce3f 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 79d8f93075d12d54143509fb538765b5194e94e3..1eb64ec4bed4a92f8e5dfba5fbc90f0a4e61407f 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 3420cfd1278d740c1bf28261cc52ec68785d1b0f..d18e8a76bb2504b43b12d43da84a6c07845a542b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index c6c7f54637fb2a35b97c1fe3b7d0e9cdb73517b9..5842cb18b49ef4d6196ee2fa49b8872b6cc08d38 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 9cc2849f3f85567c6adac8a966a4d0edc3627ada..e967950e85369d3749a7a9fff9cb504e5c48b3bd 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 4146c9c3999f5bb087870454f1761d4d234b0e3f..d68e60e7fef73250993ead48bec64201e38b4075 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 2816512185af4afd66a8a8d5734665ff30e82584..6e25c1bb6aa316aa7e783ed9c1e9fc45aeec48b5 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -783,7 +779,7 @@ do {                                                                      \
        if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) {                \
                LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL);                \
                lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
-               CDEBUG(mask, format, ## __VA_ARGS__);               \
+               CDEBUG(mask, format "\n", ## __VA_ARGS__);                  \
        }                                                                \
 } while (0)
 
index 07d45de69dd99d2aa46a831f8c36ade7bd102814..c2340d643e848f70b97024435a0e6243d1376ac2 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 9c53c1792dc836da42b37b0e41c6bcde07ca2cff..051864c23b5b2e2f5124c02c547928aedb4b64bb 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -386,7 +382,7 @@ static inline __u64 fid_ver_oid(const struct lu_fid *fid)
  * used for other purposes and not risk collisions with existing inodes.
  *
  * Different FID Format
- * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
+ * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
  */
 enum fid_seq {
        FID_SEQ_OST_MDT0        = 0,
@@ -704,7 +700,7 @@ static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
  * be passed through unchanged.  Only legacy OST objects in "group 0"
  * will be mapped into the IDIF namespace so that they can fit into the
  * struct lu_fid fields without loss.  For reference see:
- * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
+ * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
  */
 static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
                               __u32 ost_idx)
@@ -1241,8 +1237,16 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
                                                  */
 #define OBD_CONNECT_ATTRFID           0x4000ULL /*Server can GetAttr By Fid*/
 #define OBD_CONNECT_NODEVOH           0x8000ULL /*No open hndl on specl nodes*/
-#define OBD_CONNECT_RMT_CLIENT       0x10000ULL /*Remote client */
-#define OBD_CONNECT_RMT_CLIENT_FORCE  0x20000ULL /*Remote client by force */
+#define OBD_CONNECT_RMT_CLIENT       0x10000ULL /* Remote client, never used
+                                                 * in production. Removed in
+                                                 * 2.9. Keep this flag to
+                                                 * avoid reuse.
+                                                 */
+#define OBD_CONNECT_RMT_CLIENT_FORCE  0x20000ULL /* Remote client by force,
+                                                 * never used in production.
+                                                 * Removed in 2.9. Keep this
+                                                 * flag to avoid reuse
+                                                 */
 #define OBD_CONNECT_BRW_SIZE         0x40000ULL /*Max bytes per rpc */
 #define OBD_CONNECT_QUOTA64          0x80000ULL /*Not used since 2.4 */
 #define OBD_CONNECT_MDS_CAPA        0x100000ULL /*MDS capability */
@@ -1703,7 +1707,7 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
 #define OBD_MD_FLXATTRLS     (0x0000002000000000ULL) /* xattr list */
 #define OBD_MD_FLXATTRRM     (0x0000004000000000ULL) /* xattr remove */
 #define OBD_MD_FLACL        (0x0000008000000000ULL) /* ACL */
-#define OBD_MD_FLRMTPERM     (0x0000010000000000ULL) /* remote permission */
+/*     OBD_MD_FLRMTPERM     (0x0000010000000000ULL) remote perm, obsolete */
 #define OBD_MD_FLMDSCAPA     (0x0000020000000000ULL) /* MDS capability */
 #define OBD_MD_FLOSSCAPA     (0x0000040000000000ULL) /* OSS capability */
 #define OBD_MD_FLCKSPLIT     (0x0000080000000000ULL) /* Check split on server */
@@ -1715,10 +1719,10 @@ lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
                                                      */
 #define OBD_MD_FLOBJCOUNT    (0x0000400000000000ULL) /* for multiple destroy */
 
-#define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
-#define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
-#define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
-#define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
+/*     OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) lfs lsetfacl, obsolete */
+/*     OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) lfs lgetfacl, obsolete */
+/*     OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) lfs rsetfacl, obsolete */
+/*     OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
 
 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
 #define OBD_MD_FLRELEASED    (0x0020000000000000ULL) /* file released */
@@ -2159,26 +2163,8 @@ enum {
        CFS_SETUID_PERM = 0x01,
        CFS_SETGID_PERM = 0x02,
        CFS_SETGRP_PERM = 0x04,
-       CFS_RMTACL_PERM = 0x08,
-       CFS_RMTOWN_PERM = 0x10
 };
 
-/* inode access permission for remote user, the inode info are omitted,
- * for client knows them.
- */
-struct mdt_remote_perm {
-       __u32      rp_uid;
-       __u32      rp_gid;
-       __u32      rp_fsuid;
-       __u32      rp_fsuid_h;
-       __u32      rp_fsgid;
-       __u32      rp_fsgid_h;
-       __u32      rp_access_perm; /* MAY_READ/WRITE/EXEC */
-       __u32      rp_padding;
-};
-
-void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
-
 struct mdt_rec_setattr {
        __u32      sa_opcode;
        __u32      sa_cap;
index 59ba48ac31a74ea796c00ee0b7683a1fbb4045a0..ef6f38ff359ea15208cca36131537aa7f4a21f59 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -215,7 +211,7 @@ struct ost_id {
 #define IOC_OBD_STATFS           _IOWR('f', 164, struct obd_statfs *)
 #define IOC_LOV_GETINFO                 _IOWR('f', 165, struct lov_user_mds_data *)
 #define LL_IOC_FLUSHCTX                 _IOW('f', 166, long)
-#define LL_IOC_RMTACL             _IOW('f', 167, long)
+/* LL_IOC_RMTACL                       167 obsolete */
 #define LL_IOC_GETOBDCOUNT           _IOR('f', 168, long)
 #define LL_IOC_LLOOP_ATTACH         _IOWR('f', 169, long)
 #define LL_IOC_LLOOP_DETACH         _IOWR('f', 170, long)
@@ -542,19 +538,6 @@ struct identity_downcall_data {
        __u32                       idd_groups[0];
 };
 
-/* for non-mapped uid/gid */
-#define NOBODY_UID      99
-#define NOBODY_GID      99
-
-#define INVALID_ID      (-1)
-
-enum {
-       RMT_LSETFACL    = 1,
-       RMT_LGETFACL    = 2,
-       RMT_RSETFACL    = 3,
-       RMT_RGETFACL    = 4
-};
-
 /* lustre volatile file support
  * file name header: .^L^S^T^R:volatile"
  */
index aa4cfa7b749da0f3baea0c89dd1a249c0acfb566..fecabe139b1f1743b104cf2f0ea6b20336ee5672 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e229e91f7f562385238f259d7b2190c9dc469d03..95a0be13c0fb0cf3cb1c95bc81b1185da8a4543c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 8a089413c92e94f3b8155efee6f02b6a8a3bc0fc..93c1bdaf71a457e615dfa41ceac8aa0f407983d1 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index b36821ffb252961975767c66c099b6f07e99b4ec..8886458748c1316f27c5825da73c818605cdce0d 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 9cade144faca5635ce31a356d0e3a9c6da9f5ee4..60051a5cfe208750130e7112b937bbbad9c1ff78 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -1077,7 +1073,7 @@ void ldlm_lock2handle(const struct ldlm_lock *lock,
 struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *, __u64 flags);
 void ldlm_cancel_callback(struct ldlm_lock *);
 int ldlm_lock_remove_from_lru(struct ldlm_lock *);
-int ldlm_lock_set_data(struct lustre_handle *, void *);
+int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data);
 
 /**
  * Obtain a lock reference by its handle.
@@ -1166,10 +1162,10 @@ do {                                        \
 struct ldlm_lock *ldlm_lock_get(struct ldlm_lock *lock);
 void ldlm_lock_put(struct ldlm_lock *lock);
 void ldlm_lock2desc(struct ldlm_lock *lock, struct ldlm_lock_desc *desc);
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode);
-int  ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode);
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode);
+int  ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode);
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode);
 void ldlm_lock_fail_match_locked(struct ldlm_lock *lock);
 void ldlm_lock_allow_match(struct ldlm_lock *lock);
 void ldlm_lock_allow_match_locked(struct ldlm_lock *lock);
@@ -1178,10 +1174,10 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
                               enum ldlm_type type, ldlm_policy_data_t *,
                               enum ldlm_mode mode, struct lustre_handle *,
                               int unref);
-enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
                                           __u64 *bits);
 void ldlm_lock_cancel(struct ldlm_lock *lock);
-void ldlm_lock_dump_handle(int level, struct lustre_handle *);
+void ldlm_lock_dump_handle(int level, const struct lustre_handle *);
 void ldlm_unlink_lock_skiplist(struct ldlm_lock *req);
 
 /* resource.c */
@@ -1255,9 +1251,9 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
                          enum ldlm_type type, __u8 with_policy,
                          enum ldlm_mode mode,
                          __u64 *flags, void *lvb, __u32 lvb_len,
-                         struct lustre_handle *lockh, int rc);
+                         const struct lustre_handle *lockh, int rc);
 int ldlm_cli_update_pool(struct ptlrpc_request *req);
-int ldlm_cli_cancel(struct lustre_handle *lockh,
+int ldlm_cli_cancel(const struct lustre_handle *lockh,
                    enum ldlm_cancel_flags cancel_flags);
 int ldlm_cli_cancel_unused(struct ldlm_namespace *, const struct ldlm_res_id *,
                           enum ldlm_cancel_flags flags, void *opaque);
index 0b66593a95263463ff093bda3b78cddc854b5a1c..d1039e1ff70df7d7426363fdfd9883144e288435 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -70,17 +66,6 @@ typedef struct {
 #define CFS_ACL_XATTR_COUNT(size, prefix) \
        (((size) - sizeof(prefix ## _header)) / sizeof(prefix ## _entry))
 
-extern ext_acl_xattr_header *
-lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size);
-extern int
-lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
-                             posix_acl_xattr_header **out);
-extern void
-lustre_ext_acl_xattr_free(ext_acl_xattr_header *header);
-extern ext_acl_xattr_header *
-lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
-                          ext_acl_xattr_header *ext_header);
-
 #endif /* CONFIG_FS_POSIX_ACL */
 
 /** @} eacl */
index 3014d27e6dc22a67c1bde69c375d8a2771a4cbe6..6e7cc4689fb8dbc8cacb2c0ae8a8cea6cf182a8a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -180,19 +176,6 @@ static inline int exp_connect_lru_resize(struct obd_export *exp)
        return !!(exp_connect_flags(exp) & OBD_CONNECT_LRU_RESIZE);
 }
 
-static inline int exp_connect_rmtclient(struct obd_export *exp)
-{
-       return !!(exp_connect_flags(exp) & OBD_CONNECT_RMT_CLIENT);
-}
-
-static inline int client_is_remote(struct obd_export *exp)
-{
-       struct obd_import *imp = class_exp2cliimp(exp);
-
-       return !!(imp->imp_connect_data.ocd_connect_flags &
-                 OBD_CONNECT_RMT_CLIENT);
-}
-
 static inline int exp_connect_vbr(struct obd_export *exp)
 {
        return !!(exp_connect_flags(exp) & OBD_CONNECT_VBR);
index 12e8b585c2b41e737c099e6b79f0b9e3e278468f..743671a547efe364391b3815b0cac7da219e3ff1 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -45,7 +41,7 @@
  *
  * @{
  *
- * http://wiki.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
+ * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
  * describes the FID namespace and interoperability requirements for FIDs.
  * The important parts of that document are included here for reference.
  *
index 4cf2b0e61672dae6823081535e3795ff5eec9a35..932410d3e3ccf19a6bcb904265ee695ec62786fe 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 5488a698dabd08fcbd92390c6f5408469d7fee17..cde7ed702c86507399c47ea971be79d3cb12fb8a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 27f169d2ed34753bc30f98cdb17e62783b4b5aa0..1a63a6b9e1161e472342777e1195bc11406d90d9 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 8325c82b3ebf689ec481b1f452301b5cff4dc0f8..4445be7a59ddede2e1b1bce2f47255ce142e282e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index c491d52d86a238c0f7e0145f95728bc5893735d3..ed2b6c6741094f8a015a058a6e650e51dec67d5f 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
 #define LUSTRE_INTENT_H
 
 /* intent IT_XXX are defined in lustre/include/obd.h */
-struct lustre_intent_data {
+
+struct lookup_intent {
+       int             it_op;
+       int             it_create_mode;
+       __u64           it_flags;
        int             it_disposition;
        int             it_status;
        __u64           it_lock_handle;
@@ -46,17 +46,23 @@ struct lustre_intent_data {
        int             it_lock_mode;
        int             it_remote_lock_mode;
        __u64      it_remote_lock_handle;
-       void       *it_data;
+       struct ptlrpc_request *it_request;
        unsigned int    it_lock_set:1;
 };
 
-struct lookup_intent {
-       int     it_op;
-       int     it_create_mode;
-       __u64   it_flags;
-       union {
-               struct lustre_intent_data lustre;
-       } d;
-};
+static inline int it_disposition(struct lookup_intent *it, int flag)
+{
+       return it->it_disposition & flag;
+}
+
+static inline void it_set_disposition(struct lookup_intent *it, int flag)
+{
+       it->it_disposition |= flag;
+}
+
+static inline void it_clear_disposition(struct lookup_intent *it, int flag)
+{
+       it->it_disposition &= ~flag;
+}
 
 #endif
index 00b976766aef5ccb315e3e89473adc0fd28bcfdf..06958f217fc8f8ef28c8d2ae567f6b852f66cfd6 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index fcc5ebbceed8713b5bbddad9872aae9480fd8c2d..b16897702559ad006cc1b82891adf3a006a06b67 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 49618e1868247c28c83fda8fda6928cb84469d0d..b96e02317bfcf65bd1474a04fb6a18e1eea8cc1e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index f267ff8a6ec83b74026a6d76196f8772c6d6bc65..fa62b95d351fd6a67869a437abca8ad3e0a41176 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -189,9 +185,6 @@ struct mdc_cache_waiter {
 };
 
 /* mdc/mdc_locks.c */
-int it_disposition(struct lookup_intent *it, int flag);
-void it_clear_disposition(struct lookup_intent *it, int flag);
-void it_set_disposition(struct lookup_intent *it, int flag);
 int it_open_error(int phase, struct lookup_intent *it);
 
 static inline bool cl_is_lov_delay_create(unsigned int flags)
index 95d27ddecfb393e95655727172c1bc57d6e1c42f..4104bd9bd5c4dcb0f8d2d140a80ead0e42ed8986 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index a7973d5de1683df23d6a243b7c1df7cda51b836c..d5debd615fdf1c198c7b40b1091776577bfb28fb 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
 /* Macro to hide a typecast. */
 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
 
+struct ptlrpc_replay_async_args {
+       int             praa_old_state;
+       int             praa_old_status;
+};
+
 /**
  * Structure to single define portal connection.
  */
@@ -479,8 +480,9 @@ enum rq_phase {
        RQ_PHASE_BULK      = 0xebc0de02,
        RQ_PHASE_INTERPRET      = 0xebc0de03,
        RQ_PHASE_COMPLETE       = 0xebc0de04,
-       RQ_PHASE_UNREGISTERING  = 0xebc0de05,
-       RQ_PHASE_UNDEFINED      = 0xebc0de06
+       RQ_PHASE_UNREG_RPC      = 0xebc0de05,
+       RQ_PHASE_UNREG_BULK     = 0xebc0de06,
+       RQ_PHASE_UNDEFINED      = 0xebc0de07
 };
 
 /** Type of request interpreter call-back */
@@ -1247,22 +1249,103 @@ struct ptlrpc_hpreq_ops {
        void (*hpreq_fini)(struct ptlrpc_request *);
 };
 
-/**
- * Represents remote procedure call.
- *
- * This is a staple structure used by everybody wanting to send a request
- * in Lustre.
- */
-struct ptlrpc_request {
-       /* Request type: one of PTL_RPC_MSG_* */
-       int rq_type;
-       /** Result of request processing */
-       int rq_status;
+struct ptlrpc_cli_req {
+       /** For bulk requests on client only: bulk descriptor */
+       struct ptlrpc_bulk_desc         *cr_bulk;
+       /** optional time limit for send attempts */
+       long                             cr_delay_limit;
+       /** time request was first queued */
+       time_t                           cr_queued_time;
+       /** request sent timeval */
+       struct timespec64                cr_sent_tv;
+       /** time for request really sent out */
+       time_t                           cr_sent_out;
+       /** when req reply unlink must finish. */
+       time_t                           cr_reply_deadline;
+       /** when req bulk unlink must finish. */
+       time_t                           cr_bulk_deadline;
+       /** when req unlink must finish. */
+       time_t                           cr_req_deadline;
+       /** Portal to which this request would be sent */
+       short                            cr_req_ptl;
+       /** Portal where to wait for reply and where reply would be sent */
+       short                            cr_rep_ptl;
+       /** request resending number */
+       unsigned int                     cr_resend_nr;
+       /** What was import generation when this request was sent */
+       int                              cr_imp_gen;
+       enum lustre_imp_state            cr_send_state;
+       /** Per-request waitq introduced by bug 21938 for recovery waiting */
+       wait_queue_head_t                cr_set_waitq;
+       /** Link item for request set lists */
+       struct list_head                 cr_set_chain;
+       /** link to waited ctx */
+       struct list_head                 cr_ctx_chain;
+
+       /** client's half ctx */
+       struct ptlrpc_cli_ctx           *cr_cli_ctx;
+       /** Link back to the request set */
+       struct ptlrpc_request_set       *cr_set;
+       /** outgoing request MD handle */
+       lnet_handle_md_t                 cr_req_md_h;
+       /** request-out callback parameter */
+       struct ptlrpc_cb_id              cr_req_cbid;
+       /** incoming reply MD handle */
+       lnet_handle_md_t                 cr_reply_md_h;
+       wait_queue_head_t                cr_reply_waitq;
+       /** reply callback parameter */
+       struct ptlrpc_cb_id              cr_reply_cbid;
+       /** Async completion handler, called when reply is received */
+       ptlrpc_interpterer_t             cr_reply_interp;
+       /** Async completion context */
+       union ptlrpc_async_args          cr_async_args;
+       /** Opaq data for replay and commit callbacks. */
+       void                            *cr_cb_data;
        /**
-        * Linkage item through which this request is included into
-        * sending/delayed lists on client and into rqbd list on server
+        * Commit callback, called when request is committed and about to be
+        * freed.
         */
-       struct list_head rq_list;
+       void (*cr_commit_cb)(struct ptlrpc_request *);
+       /** Replay callback, called after request is replayed at recovery */
+       void (*cr_replay_cb)(struct ptlrpc_request *);
+};
+
+/** client request member alias */
+/* NB: these alias should NOT be used by any new code, instead they should
+ * be removed step by step to avoid potential abuse
+ */
+#define rq_bulk                        rq_cli.cr_bulk
+#define rq_delay_limit         rq_cli.cr_delay_limit
+#define rq_queued_time         rq_cli.cr_queued_time
+#define rq_sent_tv             rq_cli.cr_sent_tv
+#define rq_real_sent           rq_cli.cr_sent_out
+#define rq_reply_deadline      rq_cli.cr_reply_deadline
+#define rq_bulk_deadline       rq_cli.cr_bulk_deadline
+#define rq_req_deadline                rq_cli.cr_req_deadline
+#define rq_nr_resend           rq_cli.cr_resend_nr
+#define rq_request_portal      rq_cli.cr_req_ptl
+#define rq_reply_portal                rq_cli.cr_rep_ptl
+#define rq_import_generation   rq_cli.cr_imp_gen
+#define rq_send_state          rq_cli.cr_send_state
+#define rq_set_chain           rq_cli.cr_set_chain
+#define rq_ctx_chain           rq_cli.cr_ctx_chain
+#define rq_set                 rq_cli.cr_set
+#define rq_set_waitq           rq_cli.cr_set_waitq
+#define rq_cli_ctx             rq_cli.cr_cli_ctx
+#define rq_req_md_h            rq_cli.cr_req_md_h
+#define rq_req_cbid            rq_cli.cr_req_cbid
+#define rq_reply_md_h          rq_cli.cr_reply_md_h
+#define rq_reply_waitq         rq_cli.cr_reply_waitq
+#define rq_reply_cbid          rq_cli.cr_reply_cbid
+#define rq_interpret_reply     rq_cli.cr_reply_interp
+#define rq_async_args          rq_cli.cr_async_args
+#define rq_cb_data             rq_cli.cr_cb_data
+#define rq_commit_cb           rq_cli.cr_commit_cb
+#define rq_replay_cb           rq_cli.cr_replay_cb
+
+struct ptlrpc_srv_req {
+       /** initial thread servicing this request */
+       struct ptlrpc_thread            *sr_svc_thread;
        /**
         * Server side list of incoming unserved requests sorted by arrival
         * time.  Traversed from time to time to notice about to expire
@@ -1270,32 +1353,86 @@ struct ptlrpc_request {
         * know server is alive and well, just very busy to service their
         * requests in time
         */
-       struct list_head rq_timed_list;
-       /** server-side history, used for debugging purposes. */
-       struct list_head rq_history_list;
+       struct list_head                sr_timed_list;
        /** server-side per-export list */
-       struct list_head rq_exp_list;
-       /** server-side hp handlers */
-       struct ptlrpc_hpreq_ops *rq_ops;
-
-       /** initial thread servicing this request */
-       struct ptlrpc_thread *rq_svc_thread;
-
+       struct list_head                sr_exp_list;
+       /** server-side history, used for debuging purposes. */
+       struct list_head                sr_hist_list;
        /** history sequence # */
-       __u64 rq_history_seq;
+       __u64                           sr_hist_seq;
+       /** the index of service's srv_at_array into which request is linked */
+       time_t                          sr_at_index;
+       /** authed uid */
+       uid_t                           sr_auth_uid;
+       /** authed uid mapped to */
+       uid_t                           sr_auth_mapped_uid;
+       /** RPC is generated from what part of Lustre */
+       enum lustre_sec_part            sr_sp_from;
+       /** request session context */
+       struct lu_context               sr_ses;
        /** \addtogroup  nrs
         * @{
         */
        /** stub for NRS request */
-       struct ptlrpc_nrs_request rq_nrq;
+       struct ptlrpc_nrs_request       sr_nrq;
        /** @} nrs */
-       /** the index of service's srv_at_array into which request is linked */
-       u32 rq_at_index;
+       /** request arrival time */
+       struct timespec64               sr_arrival_time;
+       /** server's half ctx */
+       struct ptlrpc_svc_ctx           *sr_svc_ctx;
+       /** (server side), pointed directly into req buffer */
+       struct ptlrpc_user_desc         *sr_user_desc;
+       /** separated reply state */
+       struct ptlrpc_reply_state       *sr_reply_state;
+       /** server-side hp handlers */
+       struct ptlrpc_hpreq_ops         *sr_ops;
+       /** incoming request buffer */
+       struct ptlrpc_request_buffer_desc *sr_rqbd;
+};
+
+/** server request member alias */
+/* NB: these alias should NOT be used by any new code, instead they should
+ * be removed step by step to avoid potential abuse
+ */
+#define rq_svc_thread          rq_srv.sr_svc_thread
+#define rq_timed_list          rq_srv.sr_timed_list
+#define rq_exp_list            rq_srv.sr_exp_list
+#define rq_history_list                rq_srv.sr_hist_list
+#define rq_history_seq         rq_srv.sr_hist_seq
+#define rq_at_index            rq_srv.sr_at_index
+#define rq_auth_uid            rq_srv.sr_auth_uid
+#define rq_auth_mapped_uid     rq_srv.sr_auth_mapped_uid
+#define rq_sp_from             rq_srv.sr_sp_from
+#define rq_session             rq_srv.sr_ses
+#define rq_nrq                 rq_srv.sr_nrq
+#define rq_arrival_time                rq_srv.sr_arrival_time
+#define rq_reply_state         rq_srv.sr_reply_state
+#define rq_svc_ctx             rq_srv.sr_svc_ctx
+#define rq_user_desc           rq_srv.sr_user_desc
+#define rq_ops                 rq_srv.sr_ops
+#define rq_rqbd                        rq_srv.sr_rqbd
+
+/**
+ * Represents remote procedure call.
+ *
+ * This is a staple structure used by everybody wanting to send a request
+ * in Lustre.
+ */
+struct ptlrpc_request {
+       /* Request type: one of PTL_RPC_MSG_* */
+       int                              rq_type;
+       /** Result of request processing */
+       int                              rq_status;
+       /**
+        * Linkage item through which this request is included into
+        * sending/delayed lists on client and into rqbd list on server
+        */
+       struct list_head                 rq_list;
        /** Lock to protect request flags and some other important bits, like
         * rq_list
         */
        spinlock_t rq_lock;
-       /** client-side flags are serialized by rq_lock */
+       /** client-side flags are serialized by rq_lock @{ */
        unsigned int rq_intr:1, rq_replied:1, rq_err:1,
                rq_timedout:1, rq_resend:1, rq_restart:1,
                /**
@@ -1311,18 +1448,15 @@ struct ptlrpc_request {
                rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
                rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
                rq_early:1,
-               rq_req_unlink:1, rq_reply_unlink:1,
+               rq_req_unlinked:1,      /* unlinked request buffer from lnet */
+               rq_reply_unlinked:1,    /* unlinked reply buffer from lnet */
                rq_memalloc:1,      /* req originated from "kswapd" */
-               /* server-side flags */
-               rq_packed_final:1,  /* packed final reply */
-               rq_hp:1,            /* high priority RPC */
-               rq_at_linked:1,     /* link into service's srv_at_array */
-               rq_reply_truncate:1,
                rq_committed:1,
-               /* whether the "rq_set" is a valid one */
+               rq_reply_truncated:1,
+               /** whether the "rq_set" is a valid one */
                rq_invalid_rqset:1,
                rq_generation_set:1,
-               /* do not resend request on -EINPROGRESS */
+               /** do not resend request on -EINPROGRESS */
                rq_no_retry_einprogress:1,
                /* allow the req to be sent if the import is in recovery
                 * status
@@ -1330,20 +1464,24 @@ struct ptlrpc_request {
                rq_allow_replay:1,
                /* bulk request, sent to server, but uncommitted */
                rq_unstable:1;
+       /** @} */
 
-       unsigned int rq_nr_resend;
-
-       enum rq_phase rq_phase; /* one of RQ_PHASE_* */
-       enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
-       atomic_t rq_refcount; /* client-side refcount for SENT race,
-                              * server-side refcount for multiple replies
-                              */
-
-       /** Portal to which this request would be sent */
-       short rq_request_portal;  /* XXX FIXME bug 249 */
-       /** Portal where to wait for reply and where reply would be sent */
-       short rq_reply_portal;    /* XXX FIXME bug 249 */
+       /** server-side flags @{ */
+       unsigned int
+               rq_hp:1,                /**< high priority RPC */
+               rq_at_linked:1,         /**< link into service's srv_at_array */
+               rq_packed_final:1;      /**< packed final reply */
+       /** @} */
 
+       /** one of RQ_PHASE_* */
+       enum rq_phase                   rq_phase;
+       /** one of RQ_PHASE_* to be used next */
+       enum rq_phase                   rq_next_phase;
+       /**
+        * client-side refcount for SENT race, server-side refcount
+        * for multiple replies
+        */
+       atomic_t                        rq_refcount;
        /**
         * client-side:
         * !rq_truncate : # reply bytes actually received,
@@ -1354,6 +1492,8 @@ struct ptlrpc_request {
        int rq_reqlen;
        /** Reply length */
        int rq_replen;
+       /** Pool if request is from preallocated list */
+       struct ptlrpc_request_pool     *rq_pool;
        /** Request message - what client sent */
        struct lustre_msg *rq_reqmsg;
        /** Reply message - server response */
@@ -1366,19 +1506,20 @@ struct ptlrpc_request {
         * List item to for replay list. Not yet committed requests get linked
         * there.
         * Also see \a rq_replay comment above.
+        * It's also link chain on obd_export::exp_req_replay_queue
         */
        struct list_head rq_replay_list;
-
+       /** non-shared members for client & server request*/
+       union {
+               struct ptlrpc_cli_req    rq_cli;
+               struct ptlrpc_srv_req    rq_srv;
+       };
        /**
         * security and encryption data
         * @{
         */
-       struct ptlrpc_cli_ctx   *rq_cli_ctx;     /**< client's half ctx */
-       struct ptlrpc_svc_ctx   *rq_svc_ctx;     /**< server's half ctx */
-       struct list_head               rq_ctx_chain;   /**< link to waited ctx */
-
-       struct sptlrpc_flavor    rq_flvr;       /**< for client & server */
-       enum lustre_sec_part     rq_sp_from;
+       /** description of flavors for client & server */
+       struct sptlrpc_flavor           rq_flvr;
 
        /* client/server security flags */
        unsigned int
@@ -1388,7 +1529,6 @@ struct ptlrpc_request {
                                 rq_bulk_write:1,    /* request bulk write */
                                 /* server authentication flags */
                                 rq_auth_gss:1,      /* authenticated by gss */
-                                rq_auth_remote:1,   /* authed as remote user */
                                 rq_auth_usr_root:1, /* authed as root */
                                 rq_auth_usr_mdt:1,  /* authed as mdt */
                                 rq_auth_usr_ost:1,  /* authed as ost */
@@ -1397,19 +1537,15 @@ struct ptlrpc_request {
                                 rq_pack_bulk:1,
                                 /* doesn't expect reply FIXME */
                                 rq_no_reply:1,
-                                rq_pill_init:1;     /* pill initialized */
-
-       uid_t               rq_auth_uid;        /* authed uid */
-       uid_t               rq_auth_mapped_uid; /* authed uid mapped to */
-
-       /* (server side), pointed directly into req buffer */
-       struct ptlrpc_user_desc *rq_user_desc;
-
-       /* various buffer pointers */
-       struct lustre_msg       *rq_reqbuf;      /* req wrapper */
-       char                *rq_repbuf;      /* rep buffer */
-       struct lustre_msg       *rq_repdata;     /* rep wrapper msg */
-       struct lustre_msg       *rq_clrbuf;      /* only in priv mode */
+                                rq_pill_init:1, /* pill initialized */
+                                rq_srv_req:1; /* server request */
+
+       /** various buffer pointers */
+       struct lustre_msg       *rq_reqbuf;     /**< req wrapper */
+       char                    *rq_repbuf;     /**< rep buffer */
+       struct lustre_msg       *rq_repdata;    /**< rep wrapper msg */
+       /** only in priv mode */
+       struct lustre_msg       *rq_clrbuf;
        int                   rq_reqbuf_len;  /* req wrapper buf len */
        int                   rq_reqdata_len; /* req wrapper msg len */
        int                   rq_repbuf_len;  /* rep buffer len */
@@ -1426,97 +1562,28 @@ struct ptlrpc_request {
        __u32 rq_req_swab_mask;
        __u32 rq_rep_swab_mask;
 
-       /** What was import generation when this request was sent */
-       int rq_import_generation;
-       enum lustre_imp_state rq_send_state;
-
        /** how many early replies (for stats) */
        int rq_early_count;
 
-       /** client+server request */
-       lnet_handle_md_t     rq_req_md_h;
-       struct ptlrpc_cb_id  rq_req_cbid;
-       /** optional time limit for send attempts */
-       long       rq_delay_limit;
-       /** time request was first queued */
-       unsigned long      rq_queued_time;
-
-       /* server-side... */
-       /** request arrival time */
-       struct timespec64       rq_arrival_time;
-       /** separated reply state */
-       struct ptlrpc_reply_state *rq_reply_state;
-       /** incoming request buffer */
-       struct ptlrpc_request_buffer_desc *rq_rqbd;
-
-       /** client-only incoming reply */
-       lnet_handle_md_t     rq_reply_md_h;
-       wait_queue_head_t         rq_reply_waitq;
-       struct ptlrpc_cb_id  rq_reply_cbid;
-
+       /** Server-side, export on which request was received */
+       struct obd_export               *rq_export;
+       /** import where request is being sent */
+       struct obd_import               *rq_import;
        /** our LNet NID */
        lnet_nid_t         rq_self;
        /** Peer description (the other side) */
        lnet_process_id_t    rq_peer;
-       /** Server-side, export on which request was received */
-       struct obd_export   *rq_export;
-       /** Client side, import where request is being sent */
-       struct obd_import   *rq_import;
-
-       /** Replay callback, called after request is replayed at recovery */
-       void (*rq_replay_cb)(struct ptlrpc_request *);
        /**
-        * Commit callback, called when request is committed and about to be
-        * freed.
+        * service time estimate (secs)
+        * If the request is not served by this time, it is marked as timed out.
         */
-       void (*rq_commit_cb)(struct ptlrpc_request *);
-       /** Opaq data for replay and commit callbacks. */
-       void  *rq_cb_data;
-
-       /** For bulk requests on client only: bulk descriptor */
-       struct ptlrpc_bulk_desc *rq_bulk;
-
-       /** client outgoing req */
+       int                     rq_timeout;
        /**
         * when request/reply sent (secs), or time when request should be sent
         */
        time64_t rq_sent;
-       /** time for request really sent out */
-       time64_t rq_real_sent;
-
-       /** when request must finish. volatile
-        * so that servers' early reply updates to the deadline aren't
-        * kept in per-cpu cache
-        */
-       volatile time64_t rq_deadline;
-       /** when req reply unlink must finish. */
-       time64_t rq_reply_deadline;
-       /** when req bulk unlink must finish. */
-       time64_t rq_bulk_deadline;
-       /**
-        * service time estimate (secs)
-        * If the requestsis not served by this time, it is marked as timed out.
-        */
-       int    rq_timeout;
-
-       /** Multi-rpc bits */
-       /** Per-request waitq introduced by bug 21938 for recovery waiting */
-       wait_queue_head_t rq_set_waitq;
-       /** Link item for request set lists */
-       struct list_head  rq_set_chain;
-       /** Link back to the request set */
-       struct ptlrpc_request_set *rq_set;
-       /** Async completion handler, called when reply is received */
-       ptlrpc_interpterer_t rq_interpret_reply;
-       /** Async completion context */
-       union ptlrpc_async_args rq_async_args;
-
-       /** Pool if request is from preallocated list */
-       struct ptlrpc_request_pool *rq_pool;
-
-       struct lu_context          rq_session;
-       struct lu_context          rq_recov_session;
-
+       /** when request must finish. */
+       time64_t                  rq_deadline;
        /** request format description */
        struct req_capsule        rq_pill;
 };
@@ -1629,8 +1696,10 @@ ptlrpc_phase2str(enum rq_phase phase)
                return "Interpret";
        case RQ_PHASE_COMPLETE:
                return "Complete";
-       case RQ_PHASE_UNREGISTERING:
-               return "Unregistering";
+       case RQ_PHASE_UNREG_RPC:
+               return "UnregRPC";
+       case RQ_PHASE_UNREG_BULK:
+               return "UnregBULK";
        default:
                return "?Phase?";
        }
@@ -1657,7 +1726,7 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req)
 #define DEBUG_REQ_FLAGS(req)                                               \
        ptlrpc_rqphase2str(req),                                                \
        FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"),                \
-       FLAG(req->rq_err, "E"),                                          \
+       FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"),                 \
        FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"),   \
        FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"),            \
        FLAG(req->rq_no_resend, "N"),                                      \
@@ -1665,7 +1734,7 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req)
        FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"),                  \
        FLAG(req->rq_committed, "M")
 
-#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s"
+#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s"
 
 void _debug_req(struct ptlrpc_request *req,
                struct libcfs_debug_msg_data *data, const char *fmt, ...)
@@ -2316,8 +2385,7 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
 
        desc = req->rq_bulk;
 
-       if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
-           req->rq_bulk_deadline > ktime_get_real_seconds())
+       if (req->rq_bulk_deadline > ktime_get_real_seconds())
                return 1;
 
        if (!desc)
@@ -2664,13 +2732,20 @@ ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
        if (req->rq_phase == new_phase)
                return;
 
-       if (new_phase == RQ_PHASE_UNREGISTERING) {
+       if (new_phase == RQ_PHASE_UNREG_RPC ||
+           new_phase == RQ_PHASE_UNREG_BULK) {
+               /* No embedded unregistering phases */
+               if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+                   req->rq_phase == RQ_PHASE_UNREG_BULK)
+                       return;
+
                req->rq_next_phase = req->rq_phase;
                if (req->rq_import)
                        atomic_inc(&req->rq_import->imp_unregistering);
        }
 
-       if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+       if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+           req->rq_phase == RQ_PHASE_UNREG_BULK) {
                if (req->rq_import)
                        atomic_dec(&req->rq_import->imp_unregistering);
        }
@@ -2687,9 +2762,6 @@ ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
 static inline int
 ptlrpc_client_early(struct ptlrpc_request *req)
 {
-       if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
-           req->rq_reply_deadline > ktime_get_real_seconds())
-               return 0;
        return req->rq_early;
 }
 
@@ -2699,8 +2771,7 @@ ptlrpc_client_early(struct ptlrpc_request *req)
 static inline int
 ptlrpc_client_replied(struct ptlrpc_request *req)
 {
-       if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
-           req->rq_reply_deadline > ktime_get_real_seconds())
+       if (req->rq_reply_deadline > ktime_get_real_seconds())
                return 0;
        return req->rq_replied;
 }
@@ -2709,8 +2780,7 @@ ptlrpc_client_replied(struct ptlrpc_request *req)
 static inline int
 ptlrpc_client_recv(struct ptlrpc_request *req)
 {
-       if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
-           req->rq_reply_deadline > ktime_get_real_seconds())
+       if (req->rq_reply_deadline > ktime_get_real_seconds())
                return 1;
        return req->rq_receiving_reply;
 }
@@ -2721,13 +2791,16 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
        int rc;
 
        spin_lock(&req->rq_lock);
-       if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
-           req->rq_reply_deadline > ktime_get_real_seconds()) {
+       if (req->rq_reply_deadline > ktime_get_real_seconds()) {
+               spin_unlock(&req->rq_lock);
+               return 1;
+       }
+       if (req->rq_req_deadline > ktime_get_real_seconds()) {
                spin_unlock(&req->rq_lock);
                return 1;
        }
-       rc = req->rq_receiving_reply;
-       rc = rc || req->rq_req_unlink || req->rq_reply_unlink;
+       rc = !req->rq_req_unlinked || !req->rq_reply_unlinked ||
+            req->rq_receiving_reply;
        spin_unlock(&req->rq_lock);
        return rc;
 }
index a42cf90c1cd8d8b401da13c9a756dcf23cd22a1b..82aadd32c2b8bc96abe653b0845b93cc497a6a8a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 0aac4391ea16e49af1d6d3c2d977b1ba5f56a24f..544a43c862b9185c18179e22f550b2460776d015 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -164,7 +160,7 @@ extern struct req_format RQF_MDS_IS_SUBDIR;
 extern struct req_format RQF_MDS_DONE_WRITING;
 extern struct req_format RQF_MDS_REINT;
 extern struct req_format RQF_MDS_REINT_CREATE;
-extern struct req_format RQF_MDS_REINT_CREATE_RMT_ACL;
+extern struct req_format RQF_MDS_REINT_CREATE_ACL;
 extern struct req_format RQF_MDS_REINT_CREATE_SLAVE;
 extern struct req_format RQF_MDS_REINT_CREATE_SYM;
 extern struct req_format RQF_MDS_REINT_OPEN;
index 01b4e6726a6815add6dc66628e0637e66d77f399..90c183424802499cc27f651ae955f526aa2aad07 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -221,13 +217,13 @@ enum sptlrpc_bulk_service {
 
 #define SPTLRPC_FLVR_DEFAULT       SPTLRPC_FLVR_NULL
 
-#define SPTLRPC_FLVR_INVALID       ((__u32) 0xFFFFFFFF)
-#define SPTLRPC_FLVR_ANY               ((__u32) 0xFFF00000)
+#define SPTLRPC_FLVR_INVALID       ((__u32)0xFFFFFFFF)
+#define SPTLRPC_FLVR_ANY               ((__u32)0xFFF00000)
 
 /**
  * extract the useful part from wire flavor
  */
-#define WIRE_FLVR(wflvr)               (((__u32) (wflvr)) & 0x000FFFFF)
+#define WIRE_FLVR(wflvr)               (((__u32)(wflvr)) & 0x000FFFFF)
 
 /** @} flavor */
 
index 2d926e0ee64725ffdb37b8826a9b20ea0fd1ffe0..a1bc2c478ff9a1d49a73f966f61ad667a1823d0a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -232,6 +228,12 @@ enum {
 #define MDC_MAX_RIF_DEFAULT       8
 #define MDC_MAX_RIF_MAX         512
 
+enum obd_cl_sem_lock_class {
+       OBD_CLI_SEM_NORMAL,
+       OBD_CLI_SEM_MGC,
+       OBD_CLI_SEM_MDCOSC,
+};
+
 struct mdc_rpc_lock;
 struct obd_import;
 struct client_obd {
@@ -419,7 +421,7 @@ struct lov_obd {
        enum lustre_sec_part    lov_sp_me;
 
        /* Cached LRU and unstable data from upper layer */
-       void                   *lov_cache;
+       struct cl_client_cache *lov_cache;
 
        struct rw_semaphore     lov_notify_lock;
 
@@ -1119,9 +1121,6 @@ struct md_ops {
                             ldlm_policy_data_t *, enum ldlm_mode,
                             enum ldlm_cancel_flags flags, void *opaque);
 
-       int (*get_remote_perm)(struct obd_export *, const struct lu_fid *,
-                              __u32, struct ptlrpc_request **);
-
        int (*intent_getattr_async)(struct obd_export *,
                                    struct md_enqueue_info *,
                                    struct ldlm_enqueue_info *);
index f6c18df906a88b5fdc63b896078a659c7f3079d7..a8a81e662a56f2ebfba29248988d97a380c96d64 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 32863bcb30b948f327e0ef1c555a7bde4ab24579..6482a937000bc87c4c052927040f0bcd7e659c7a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -1654,16 +1650,6 @@ static inline int md_init_ea_size(struct obd_export *exp, int easize,
                                               cookiesize, def_cookiesize);
 }
 
-static inline int md_get_remote_perm(struct obd_export *exp,
-                                    const struct lu_fid *fid, __u32 suppgid,
-                                    struct ptlrpc_request **request)
-{
-       EXP_CHECK_MD_OP(exp, get_remote_perm);
-       EXP_MD_COUNTER_INCREMENT(exp, get_remote_perm);
-       return MDP(exp->exp_obd, get_remote_perm)(exp, fid, suppgid,
-                                                 request);
-}
-
 static inline int md_intent_getattr_async(struct obd_export *exp,
                                          struct md_enqueue_info *minfo,
                                          struct ldlm_enqueue_info *einfo)
index 60034d39b00d4d57c9907139ddb1f9351058c003..845e64a56c21b9a7532e9d5158a3f3a37b7913b8 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -368,6 +364,9 @@ extern char obd_jobid_var[];
 #define OBD_FAIL_PTLRPC_CLIENT_BULK_CB2  0x515
 #define OBD_FAIL_PTLRPC_DELAY_IMP_FULL   0x516
 #define OBD_FAIL_PTLRPC_CANCEL_RESEND    0x517
+#define OBD_FAIL_PTLRPC_DROP_BULK       0x51a
+#define OBD_FAIL_PTLRPC_LONG_REQ_UNLINK         0x51b
+#define OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK 0x51c
 
 #define OBD_FAIL_OBD_PING_NET      0x600
 #define OBD_FAIL_OBD_LOG_CANCEL_NET      0x601
index 323060626fdfcb7ec845a1b50b76cbb3c55b08ab..f4a70ebddeaf689d9fdf5aabca64bc9d652fb81e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 621323f6ee60f0549054de5a4f535dbcf01af6d6..ea8840cb90562bc7d8240167c9db90c43bb89273 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index cf1f1783632fe23c2e91cb980d68ecbe673941fc..f5023d9b78f5d833de8602d64b937aa8b8172233 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 349bfcc9b33128418958e32655babc50a33757b5..d6b61bc391357191f1a35fe5ade6ae3309481b84 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -259,14 +255,13 @@ reprocess:
                         * overflow and underflow.
                         */
                        if ((new->l_policy_data.l_flock.start >
-                            (lock->l_policy_data.l_flock.end + 1))
-                           && (lock->l_policy_data.l_flock.end !=
-                               OBD_OBJECT_EOF))
+                            (lock->l_policy_data.l_flock.end + 1)) &&
+                           (lock->l_policy_data.l_flock.end != OBD_OBJECT_EOF))
                                continue;
 
                        if ((new->l_policy_data.l_flock.end <
-                            (lock->l_policy_data.l_flock.start - 1))
-                           && (lock->l_policy_data.l_flock.start != 0))
+                            (lock->l_policy_data.l_flock.start - 1)) &&
+                           (lock->l_policy_data.l_flock.start != 0))
                                break;
 
                        if (new->l_policy_data.l_flock.start <
index b1bed1e17d3244782968fdce2a6fe3d76c23f50f..79f4e6fa193e1d0b1d34b9953a52d16276ec52f2 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 32f227f3779961befc50e7e926755a36cd4ad45f..e4cf65d2d3b1c3076fbeab10bba06799246134ed 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index b4ffbe2fc4eda02032ae4baafc799a173dd180ab..7c832aae7d5ed0d18de59b451826d28381a5b23c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -345,7 +341,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
         * Set cl_chksum* to CRC32 for now to avoid returning screwed info
         * through procfs.
         */
-       cli->cl_cksum_type = cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
+       cli->cl_cksum_type = OBD_CKSUM_CRC32;
+       cli->cl_supp_cksum_types = OBD_CKSUM_CRC32;
        atomic_set(&cli->cl_resends, OSC_DEFAULT_RESENDS);
 
        /* This value may be reduced at connect time in
index bff94ea12d6f8ca983d67f0f8b02b2826943c985..a5993f745ebe49d06354ab3688fe9c8c01982b5a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -662,7 +658,7 @@ static void ldlm_add_ast_work_item(struct ldlm_lock *lock,
  * r/w reference type is determined by \a mode
  * Calls ldlm_lock_addref_internal.
  */
-void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_addref(const struct lustre_handle *lockh, __u32 mode)
 {
        struct ldlm_lock *lock;
 
@@ -704,7 +700,7 @@ void ldlm_lock_addref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
  *
  * \retval -EAGAIN lock is being canceled.
  */
-int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
+int ldlm_lock_addref_try(const struct lustre_handle *lockh, __u32 mode)
 {
        struct ldlm_lock *lock;
        int            result;
@@ -836,7 +832,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
 /**
  * Decrease reader/writer refcount for LDLM lock with handle \a lockh
  */
-void ldlm_lock_decref(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref(const struct lustre_handle *lockh, __u32 mode)
 {
        struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
 
@@ -853,7 +849,7 @@ EXPORT_SYMBOL(ldlm_lock_decref);
  *
  * Typical usage is for GROUP locks which we cannot allow to be cached.
  */
-void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
+void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode)
 {
        struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
 
@@ -1322,7 +1318,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
 }
 EXPORT_SYMBOL(ldlm_lock_match);
 
-enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
+enum ldlm_mode ldlm_revalidate_lock_handle(const struct lustre_handle *lockh,
                                           __u64 *bits)
 {
        struct ldlm_lock *lock;
@@ -1444,7 +1440,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
                memcpy(data, lvb, size);
                break;
        default:
-               LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type);
+               LDLM_ERROR(lock, "Unknown LVB type: %d", lock->l_lvb_type);
                dump_stack();
                return -EINVAL;
        }
@@ -1853,7 +1849,7 @@ EXPORT_SYMBOL(ldlm_lock_cancel);
 /**
  * Set opaque data into the lock that only makes sense to upper layer.
  */
-int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
+int ldlm_lock_set_data(const struct lustre_handle *lockh, void *data)
 {
        struct ldlm_lock *lock = ldlm_handle2lock(lockh);
        int rc = -EINVAL;
@@ -1879,7 +1875,7 @@ struct export_cl_data {
  *
  * Used when printing all locks on a resource for debug purposes.
  */
-void ldlm_lock_dump_handle(int level, struct lustre_handle *lockh)
+void ldlm_lock_dump_handle(int level, const struct lustre_handle *lockh)
 {
        struct ldlm_lock *lock;
 
index ab739f079a4872c437e777997eb42166e410c90e..821939ff2e6bb6208cf25af1155d97636496e7af 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -503,7 +499,7 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req)
 
 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
                                        const char *msg, int rc,
-                                       struct lustre_handle *handle)
+                                       const struct lustre_handle *handle)
 {
        DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
                  "%s: [nid %s] [rc %d] [lock %#llx]",
@@ -641,7 +637,8 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
                 */
                if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
                    ldlm_is_failed(lock)) {
-                       LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
+                       LDLM_DEBUG(lock,
+                                  "callback on lock %#llx - lock disappeared",
                                   dlm_req->lock_handle[0].cookie);
                        unlock_res_and_lock(lock);
                        LDLM_LOCK_RELEASE(lock);
@@ -1011,9 +1008,11 @@ static int ldlm_setup(void)
                blp->blp_min_threads = LDLM_NTHRS_INIT;
                blp->blp_max_threads = LDLM_NTHRS_MAX;
        } else {
-               blp->blp_min_threads = blp->blp_max_threads =
-                       min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
-                                                        ldlm_num_threads));
+               blp->blp_min_threads = min_t(int, LDLM_NTHRS_MAX,
+                                            max_t(int, LDLM_NTHRS_INIT,
+                                                  ldlm_num_threads));
+
+               blp->blp_max_threads = blp->blp_min_threads;
        }
 
        for (i = 0; i < blp->blp_min_threads; i++) {
index 0c1965ddabb9e15bbd8615eb55af3c2f0c9db861..0aed39c46154500dcf2059d073fc79694e910812 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index b913ba9cf97c5327dabfb5289c3c281f11c2e647..657ed4012776b0f6aa38f4dbb44350dae6238541 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 107314e284a02edd6f719977c9b5959fc483a5fb..af487f9937f4636b060ecdeda0913fc3b08be21c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -340,7 +336,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
                          enum ldlm_type type, __u8 with_policy,
                          enum ldlm_mode mode,
                          __u64 *flags, void *lvb, __u32 lvb_len,
-                         struct lustre_handle *lockh, int rc)
+                         const struct lustre_handle *lockh, int rc)
 {
        struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
        int is_replay = *flags & LDLM_FL_REPLAY;
@@ -715,7 +711,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
 
                        lock->l_req_extent = policy->l_extent;
                }
-               LDLM_DEBUG(lock, "client-side enqueue START, flags %llx\n",
+               LDLM_DEBUG(lock, "client-side enqueue START, flags %llx",
                           *flags);
        }
 
@@ -1027,7 +1023,7 @@ EXPORT_SYMBOL(ldlm_cli_update_pool);
  *
  * Lock must not have any readers or writers by this time.
  */
-int ldlm_cli_cancel(struct lustre_handle *lockh,
+int ldlm_cli_cancel(const struct lustre_handle *lockh,
                    enum ldlm_cancel_flags cancel_flags)
 {
        struct obd_export *exp;
index e99c89c34cd0148e37f067308be1e33de799a2f2..51a28d96af39197109289f247450efb96d4d6a52 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -1279,7 +1275,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
 {
        check_res_locked(res);
 
-       LDLM_DEBUG(lock, "About to add this lock:\n");
+       LDLM_DEBUG(lock, "About to add this lock:");
 
        if (ldlm_is_destroyed(lock)) {
                CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
index 2ce10ff01b808ef4c039318dc76bfe27bfba9199..2cbb1b80bd4151fac986c495b23e72553bbe7760 100644 (file)
@@ -1,11 +1,7 @@
 obj-$(CONFIG_LUSTRE_FS) += lustre.o
-obj-$(CONFIG_LUSTRE_LLITE_LLOOP) += llite_lloop.o
 lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
            rw.o namei.o symlink.o llite_mmap.o \
-           xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o \
-           rw26.o super25.o statahead.o \
+           xattr.o xattr_cache.o rw26.o super25.o statahead.o \
            glimpse.o lcommon_cl.o lcommon_misc.o \
            vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
            lproc_llite.o
-
-llite_lloop-y := lloop.o
index 1b6f82a1a4359627d313e3fa967451f054857c67..581a63a0a63e57cf86d1b48cc61a31632b535a42 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -206,27 +202,27 @@ int ll_d_init(struct dentry *de)
 
 void ll_intent_drop_lock(struct lookup_intent *it)
 {
-       if (it->it_op && it->d.lustre.it_lock_mode) {
+       if (it->it_op && it->it_lock_mode) {
                struct lustre_handle handle;
 
-               handle.cookie = it->d.lustre.it_lock_handle;
+               handle.cookie = it->it_lock_handle;
 
                CDEBUG(D_DLMTRACE, "releasing lock with cookie %#llx from it %p\n",
                       handle.cookie, it);
-               ldlm_lock_decref(&handle, it->d.lustre.it_lock_mode);
+               ldlm_lock_decref(&handle, it->it_lock_mode);
 
                /* bug 494: intent_release may be called multiple times, from
                 * this thread and we don't want to double-decref this lock
                 */
-               it->d.lustre.it_lock_mode = 0;
-               if (it->d.lustre.it_remote_lock_mode != 0) {
-                       handle.cookie = it->d.lustre.it_remote_lock_handle;
+               it->it_lock_mode = 0;
+               if (it->it_remote_lock_mode != 0) {
+                       handle.cookie = it->it_remote_lock_handle;
 
                        CDEBUG(D_DLMTRACE, "releasing remote lock with cookie%#llx from it %p\n",
                               handle.cookie, it);
                        ldlm_lock_decref(&handle,
-                                        it->d.lustre.it_remote_lock_mode);
-                       it->d.lustre.it_remote_lock_mode = 0;
+                                        it->it_remote_lock_mode);
+                       it->it_remote_lock_mode = 0;
                }
        }
 }
@@ -237,13 +233,13 @@ void ll_intent_release(struct lookup_intent *it)
        ll_intent_drop_lock(it);
        /* We are still holding extra reference on a request, need to free it */
        if (it_disposition(it, DISP_ENQ_OPEN_REF))
-               ptlrpc_req_finished(it->d.lustre.it_data); /* ll_file_open */
+               ptlrpc_req_finished(it->it_request); /* ll_file_open */
 
        if (it_disposition(it, DISP_ENQ_CREATE_REF)) /* create rec */
-               ptlrpc_req_finished(it->d.lustre.it_data);
+               ptlrpc_req_finished(it->it_request);
 
-       it->d.lustre.it_disposition = 0;
-       it->d.lustre.it_data = NULL;
+       it->it_disposition = 0;
+       it->it_request = NULL;
 }
 
 void ll_invalidate_aliases(struct inode *inode)
@@ -253,7 +249,7 @@ void ll_invalidate_aliases(struct inode *inode)
        CDEBUG(D_INODE, "marking dentries for ino "DFID"(%p) invalid\n",
               PFID(ll_inode2fid(inode)), inode);
 
-       ll_lock_dcache(inode);
+       spin_lock(&inode->i_lock);
        hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
                CDEBUG(D_DENTRY, "dentry in drop %pd (%p) parent %p inode %p flags %d\n",
                       dentry, dentry, dentry->d_parent,
@@ -261,7 +257,7 @@ void ll_invalidate_aliases(struct inode *inode)
 
                d_lustre_invalidate(dentry, 0);
        }
-       ll_unlock_dcache(inode);
+       spin_unlock(&inode->i_lock);
 }
 
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
@@ -283,7 +279,7 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
 
 void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
 {
-       if (it->d.lustre.it_lock_mode && inode) {
+       if (it->it_lock_mode && inode) {
                struct ll_sb_info *sbi = ll_i2sbi(inode);
 
                CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)\n",
@@ -306,6 +302,17 @@ static int ll_revalidate_dentry(struct dentry *dentry,
 {
        struct inode *dir = d_inode(dentry->d_parent);
 
+       /* If this is intermediate component path lookup and we were able to get
+        * to this dentry, then its lock has not been revoked and the
+        * path component is valid.
+        */
+       if (lookup_flags & LOOKUP_PARENT)
+               return 1;
+
+       /* Symlink - always valid as long as the dentry was found */
+       if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))
+               return 1;
+
        /*
         * if open&create is set, talk to MDS to make sure file is created if
         * necessary, because we can't do this in ->open() later since that's
index 4b00d1ac84fb7b721495ba53f3c11df5f3bf3521..5b381779c82725c52c5308b81fc0e227a9124756 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -366,7 +362,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
 
                ll_finish_md_op_data(op_data);
 
-               request = (struct ptlrpc_request *)it.d.lustre.it_data;
+               request = (struct ptlrpc_request *)it.it_request;
                if (request)
                        ptlrpc_req_finished(request);
                if (rc < 0) {
@@ -378,7 +374,7 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
                CDEBUG(D_INODE, "setting lr_lvb_inode to inode "DFID"(%p)\n",
                       PFID(ll_inode2fid(dir)), dir);
                md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
-                                &it.d.lustre.it_lock_handle, dir, NULL);
+                                &it.it_lock_handle, dir, NULL);
        } else {
                /* for cross-ref object, l_ast_data of the lock may not be set,
                 * we reset it here
@@ -1076,17 +1072,11 @@ static int copy_and_ioctl(int cmd, struct obd_export *exp,
        void *copy;
        int rc;
 
-       copy = kzalloc(size, GFP_NOFS);
-       if (!copy)
-               return -ENOMEM;
-
-       if (copy_from_user(copy, data, size)) {
-               rc = -EFAULT;
-               goto out;
-       }
+       copy = memdup_user(data, size);
+       if (IS_ERR(copy))
+               return PTR_ERR(copy);
 
        rc = obd_iocontrol(cmd, exp, size, copy, NULL);
-out:
        kfree(copy);
 
        return rc;
@@ -1107,8 +1097,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
        case Q_QUOTAOFF:
        case Q_SETQUOTA:
        case Q_SETINFO:
-               if (!capable(CFS_CAP_SYS_ADMIN) ||
-                   sbi->ll_flags & LL_SBI_RMT_CLIENT)
+               if (!capable(CFS_CAP_SYS_ADMIN))
                        return -EPERM;
                break;
        case Q_GETQUOTA:
@@ -1116,8 +1105,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
                      !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
                     (type == GRPQUOTA &&
                      !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
-                   (!capable(CFS_CAP_SYS_ADMIN) ||
-                    sbi->ll_flags & LL_SBI_RMT_CLIENT))
+                     !capable(CFS_CAP_SYS_ADMIN))
                        return -EPERM;
                break;
        case Q_GETINFO:
@@ -1128,9 +1116,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
        }
 
        if (valid != QC_GENERAL) {
-               if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
-                       return -EOPNOTSUPP;
-
                if (cmd == Q_GETINFO)
                        qctl->qc_cmd = Q_GETOINFO;
                else if (cmd == Q_GETQUOTA)
@@ -1538,7 +1523,9 @@ skip_lmm:
                        st.st_atime   = body->atime;
                        st.st_mtime   = body->mtime;
                        st.st_ctime   = body->ctime;
-                       st.st_ino     = inode->i_ino;
+                       st.st_ino     = cl_fid_build_ino(&body->fid1,
+                                                        sbi->ll_flags &
+                                                        LL_SBI_32BIT_API);
 
                        lmdp = (struct lov_user_mds_data __user *)arg;
                        if (copy_to_user(&lmdp->lmd_st, &st, sizeof(st))) {
@@ -1631,8 +1618,7 @@ free_lmm:
                struct obd_quotactl *oqctl;
                int error = 0;
 
-               if (!capable(CFS_CAP_SYS_ADMIN) ||
-                   sbi->ll_flags & LL_SBI_RMT_CLIENT)
+               if (!capable(CFS_CAP_SYS_ADMIN))
                        return -EPERM;
 
                oqctl = kzalloc(sizeof(*oqctl), GFP_NOFS);
@@ -1655,8 +1641,7 @@ free_lmm:
        case OBD_IOC_POLL_QUOTACHECK: {
                struct if_quotacheck *check;
 
-               if (!capable(CFS_CAP_SYS_ADMIN) ||
-                   sbi->ll_flags & LL_SBI_RMT_CLIENT)
+               if (!capable(CFS_CAP_SYS_ADMIN))
                        return -EPERM;
 
                check = kzalloc(sizeof(*check), GFP_NOFS);
@@ -1713,20 +1698,6 @@ out_quotactl:
                return ll_get_obd_name(inode, cmd, arg);
        case LL_IOC_FLUSHCTX:
                return ll_flush_ctx(inode);
-#ifdef CONFIG_FS_POSIX_ACL
-       case LL_IOC_RMTACL: {
-               if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
-                       struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
-                       rc = rct_add(&sbi->ll_rct, current_pid(), arg);
-                       if (!rc)
-                               fd->fd_flags |= LL_FILE_RMTACL;
-                       return rc;
-               } else {
-                       return 0;
-               }
-       }
-#endif
        case LL_IOC_GETOBDCOUNT: {
                int count, vallen;
                struct obd_export *exp;
index f47f2acaf90c33e5b1c960452d031f97b7c82ecb..57281b9e31ffc1d71691bab4ba08a8037282ceb3 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -348,18 +344,6 @@ int ll_file_release(struct inode *inode, struct file *file)
        CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
               PFID(ll_inode2fid(inode)), inode);
 
-#ifdef CONFIG_FS_POSIX_ACL
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
-               struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
-               if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
-                       fd->fd_flags &= ~LL_FILE_RMTACL;
-                       rct_del(&sbi->ll_rct, current_pid());
-                       et_search_free(&sbi->ll_et, current_pid());
-               }
-       }
-#endif
-
        if (!is_root_inode(inode))
                ll_stats_ops_tally(sbi, LPROC_LL_RELEASE, 1);
        fd = LUSTRE_FPRIVATE(file);
@@ -415,7 +399,19 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
         * parameters. No need for the open lock
         */
        if (!lmm && lmmsize == 0) {
-               itp->it_flags |= MDS_OPEN_LOCK;
+               struct ll_dentry_data *ldd = ll_d2d(dentry);
+               /*
+                * If we came via ll_iget_for_nfs, then we need to request
+                * struct ll_dentry_data *ldd = ll_d2d(file->f_dentry);
+                *
+                * NB: when ldd is NULL, it must have come via normal
+                * lookup path only, since ll_iget_for_nfs always calls
+                * ll_d_init().
+                */
+               if (ldd && ldd->lld_nfs_dentry) {
+                       ldd->lld_nfs_dentry = 0;
+                       itp->it_flags |= MDS_OPEN_LOCK;
+               }
                if (itp->it_flags & FMODE_WRITE)
                        opc = LUSTRE_OPC_CREATE;
        }
@@ -453,7 +449,7 @@ static int ll_intent_file_open(struct dentry *dentry, void *lmm,
        }
 
        rc = ll_prep_inode(&inode, req, NULL, itp);
-       if (!rc && itp->d.lustre.it_lock_mode)
+       if (!rc && itp->it_lock_mode)
                ll_set_lock_data(sbi->ll_md_exp, inode, itp, NULL);
 
 out:
@@ -480,13 +476,12 @@ void ll_ioepoch_open(struct ll_inode_info *lli, __u64 ioepoch)
 static int ll_och_fill(struct obd_export *md_exp, struct lookup_intent *it,
                       struct obd_client_handle *och)
 {
-       struct ptlrpc_request *req = it->d.lustre.it_data;
        struct mdt_body *body;
 
-       body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+       body = req_capsule_server_get(&it->it_request->rq_pill, &RMF_MDT_BODY);
        och->och_fh = body->handle;
        och->och_fid = body->fid1;
-       och->och_lease_handle.cookie = it->d.lustre.it_lock_handle;
+       och->och_lease_handle.cookie = it->it_lock_handle;
        och->och_magic = OBD_CLIENT_HANDLE_MAGIC;
        och->och_flags = it->it_flags;
 
@@ -504,7 +499,6 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
        LASSERT(fd);
 
        if (och) {
-               struct ptlrpc_request *req = it->d.lustre.it_data;
                struct mdt_body *body;
                int rc;
 
@@ -512,13 +506,19 @@ static int ll_local_open(struct file *file, struct lookup_intent *it,
                if (rc != 0)
                        return rc;
 
-               body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
+               body = req_capsule_server_get(&it->it_request->rq_pill,
+                                             &RMF_MDT_BODY);
                ll_ioepoch_open(lli, body->ioepoch);
        }
 
        LUSTRE_FPRIVATE(file) = fd;
        ll_readahead_init(inode, &fd->fd_ras);
        fd->fd_omode = it->it_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC);
+
+       /* ll_cl_context initialize */
+       rwlock_init(&fd->fd_lock);
+       INIT_LIST_HEAD(&fd->fd_lccs);
+
        return 0;
 }
 
@@ -574,7 +574,7 @@ int ll_file_open(struct inode *inode, struct file *file)
                return 0;
        }
 
-       if (!it || !it->d.lustre.it_disposition) {
+       if (!it || !it->it_disposition) {
                /* Convert f_flags into access mode. We cannot use file->f_mode,
                 * because everything but O_ACCMODE mask was stripped from
                 * there
@@ -644,7 +644,7 @@ restart:
                }
        } else {
                LASSERT(*och_usecount == 0);
-               if (!it->d.lustre.it_disposition) {
+               if (!it->it_disposition) {
                        /* We cannot just request lock handle now, new ELC code
                         * means that one of other OPEN locks for this file
                         * could be cancelled, and since blocking ast handler
@@ -681,7 +681,7 @@ restart:
 
                LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
                         "inode %p: disposition %x, status %d\n", inode,
-                        it_disposition(it, ~0), it->d.lustre.it_status);
+                        it_disposition(it, ~0), it->it_status);
 
                rc = ll_local_open(file, it, fd, *och_p);
                if (rc)
@@ -724,7 +724,7 @@ out_openerr:
        }
 
        if (it && it_disposition(it, DISP_ENQ_OPEN_REF)) {
-               ptlrpc_req_finished(it->d.lustre.it_data);
+               ptlrpc_req_finished(it->it_request);
                it_clear_disposition(it, DISP_ENQ_OPEN_REF);
        }
 
@@ -865,12 +865,12 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
 
        /* already get lease, handle lease lock */
        ll_set_lock_data(sbi->ll_md_exp, inode, &it, NULL);
-       if (it.d.lustre.it_lock_mode == 0 ||
-           it.d.lustre.it_lock_bits != MDS_INODELOCK_OPEN) {
+       if (it.it_lock_mode == 0 ||
+           it.it_lock_bits != MDS_INODELOCK_OPEN) {
                /* open lock must return for lease */
                CERROR(DFID "lease granted but no open lock, %d/%llu.\n",
-                      PFID(ll_inode2fid(inode)), it.d.lustre.it_lock_mode,
-                      it.d.lustre.it_lock_bits);
+                      PFID(ll_inode2fid(inode)), it.it_lock_mode,
+                      it.it_lock_bits);
                rc = -EPROTO;
                goto out_close;
        }
@@ -880,10 +880,10 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
 
 out_close:
        /* Cancel open lock */
-       if (it.d.lustre.it_lock_mode != 0) {
+       if (it.it_lock_mode != 0) {
                ldlm_lock_decref_and_cancel(&och->och_lease_handle,
-                                           it.d.lustre.it_lock_mode);
-               it.d.lustre.it_lock_mode = 0;
+                                           it.it_lock_mode);
+               it.it_lock_mode = 0;
                och->och_lease_handle.cookie = 0ULL;
        }
        rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
@@ -1178,7 +1178,9 @@ restart:
                        CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
                        LBUG();
                }
+               ll_cl_add(file, env, io);
                result = cl_io_loop(env, io);
+               ll_cl_remove(file, env);
                if (args->via_io_subtype == IO_NORMAL)
                        up_read(&lli->lli_trunc_sem);
                if (write_mutex_locked)
@@ -1397,7 +1399,7 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
        rc = ll_intent_file_open(dentry, lum, lum_size, &oit);
        if (rc)
                goto out_unlock;
-       rc = oit.d.lustre.it_status;
+       rc = oit.it_status;
        if (rc < 0)
                goto out_req_free;
 
@@ -1410,7 +1412,7 @@ out_unlock:
 out:
        return rc;
 out_req_free:
-       ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
+       ptlrpc_req_finished((struct ptlrpc_request *)oit.it_request);
        goto out;
 }
 
@@ -1698,7 +1700,7 @@ int ll_release_openhandle(struct inode *inode, struct lookup_intent *it)
 out:
        /* this one is in place of ll_file_open */
        if (it_disposition(it, DISP_ENQ_OPEN_REF)) {
-               ptlrpc_req_finished(it->d.lustre.it_data);
+               ptlrpc_req_finished(it->it_request);
                it_clear_disposition(it, DISP_ENQ_OPEN_REF);
        }
        return rc;
@@ -2972,8 +2974,11 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                 * here to preserve get_cwd functionality on 2.6.
                 * Bug 10503
                 */
-               if (!d_inode(dentry)->i_nlink)
+               if (!d_inode(dentry)->i_nlink) {
+                       spin_lock(&inode->i_lock);
                        d_lustre_invalidate(dentry, 0);
+                       spin_unlock(&inode->i_lock);
+               }
 
                ll_lookup_finish_locks(&oit, inode);
        } else if (!ll_have_md_lock(d_inode(dentry), &ibits, LCK_MINMODE)) {
@@ -3124,6 +3129,9 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type)
        spin_lock(&lli->lli_lock);
        /* VFS' acl_permission_check->check_acl will release the refcount */
        acl = posix_acl_dup(lli->lli_posix_acl);
+#ifdef CONFIG_FS_POSIX_ACL
+       forget_cached_acl(inode, type);
+#endif
        spin_unlock(&lli->lli_lock);
 
        return acl;
@@ -3150,9 +3158,6 @@ int ll_inode_permission(struct inode *inode, int mask)
        CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
               PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
 
-       if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
-               return lustre_check_remote_perm(inode, mask);
-
        ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
        rc = generic_permission(inode, mask);
 
@@ -3601,13 +3606,13 @@ again:
 
        rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
                        NULL, 0, NULL, 0);
-       ptlrpc_req_finished(it.d.lustre.it_data);
-       it.d.lustre.it_data = NULL;
+       ptlrpc_req_finished(it.it_request);
+       it.it_request = NULL;
 
        ll_finish_md_op_data(op_data);
 
-       mode = it.d.lustre.it_lock_mode;
-       it.d.lustre.it_lock_mode = 0;
+       mode = it.it_lock_mode;
+       it.it_lock_mode = 0;
        ll_intent_drop_lock(&it);
 
        if (rc == 0) {
index d8ea75424e2fdb3bf8e1a8c9b9369fbd5c65a99e..92004a05f9ee3157cb4e796c83059fca238ef9bb 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 6c00715b438fa580363f9b2ff3008929221165f9..396e4e4f0715b61f142fa34743176e48631f6595 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 12f3e71f48c2820f2bd15a63ba44247f549b77a2..f6be105eeef77b6ea3e83f665e050f790cbe4146 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -100,7 +96,8 @@ int cl_ocd_update(struct obd_device *host,
        __u64 flags;
        int   result;
 
-       if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
+       if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME) &&
+           watched->obd_set_up && !watched->obd_stopping) {
                cli = &watched->u.cli;
                lco = owner;
                flags = cli->cl_import->imp_connect_data.ocd_connect_flags;
@@ -115,9 +112,10 @@ int cl_ocd_update(struct obd_device *host,
                mutex_unlock(&lco->lco_lock);
                result = 0;
        } else {
-               CERROR("unexpected notification from %s %s!\n",
+               CERROR("unexpected notification from %s %s (setup:%d,stopping:%d)!\n",
                       watched->obd_type->typ_name,
-                      watched->obd_name);
+                      watched->obd_name, watched->obd_set_up,
+                      watched->obd_stopping);
                result = -EINVAL;
        }
        return result;
index 2df551d3ae6c59163a8e51e78a2d92db46c00137..2326b40a0870def846d45231e6bc0f3b3eb92c2a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 3f2f30b6542c727cc17e56b534f3f12317ddb2e2..4d6d589a1677892ece653ab14aeeee920c8fec13 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -68,6 +64,7 @@ struct ll_dentry_data {
        struct lookup_intent            *lld_it;
        unsigned int                    lld_sa_generation;
        unsigned int                    lld_invalid:1;
+       unsigned int                    lld_nfs_dentry:1;
        struct rcu_head                 lld_rcu_head;
 };
 
@@ -76,9 +73,6 @@ struct ll_dentry_data {
 #define LLI_INODE_MAGIC                 0x111d0de5
 #define LLI_INODE_DEAD           0xdeadd00d
 
-/* remote client permission cache */
-#define REMOTE_PERM_HASHSIZE 16
-
 struct ll_getname_data {
        struct dir_context ctx;
        char        *lgd_name;      /* points to a buffer with NAME_MAX+1 size */
@@ -86,19 +80,6 @@ struct ll_getname_data {
        int           lgd_found;     /* inode matched? */
 };
 
-/* llite setxid/access permission for user on remote client */
-struct ll_remote_perm {
-       struct hlist_node       lrp_list;
-       uid_t              lrp_uid;
-       gid_t              lrp_gid;
-       uid_t              lrp_fsuid;
-       gid_t              lrp_fsgid;
-       int                lrp_access_perm; /* MAY_READ/WRITE/EXEC, this
-                                            * is access permission with
-                                            * lrp_fsuid/lrp_fsgid.
-                                            */
-};
-
 struct ll_grouplock {
        struct lu_env   *lg_env;
        struct cl_io    *lg_io;
@@ -133,9 +114,6 @@ struct ll_inode_info {
        spinlock_t                      lli_lock;
        struct posix_acl                *lli_posix_acl;
 
-       struct hlist_head               *lli_remote_perms;
-       struct mutex                            lli_rmtperm_mutex;
-
        /* identifying fields for both metadata and data stacks. */
        struct lu_fid              lli_fid;
        /* Parent fid for accessing default stripe data on parent directory
@@ -145,8 +123,6 @@ struct ll_inode_info {
 
        struct list_head              lli_close_list;
 
-       unsigned long                 lli_rmtperm_time;
-
        /* handle is to be sent to MDS later on done_writing and setattr.
         * Open handle data are needed for the recovery to reconstruct
         * the inode state on the MDS. XXX: recovery is not ready yet.
@@ -411,7 +387,7 @@ enum stats_track_type {
 #define LL_SBI_FLOCK        0x04
 #define LL_SBI_USER_XATTR      0x08 /* support user xattr */
 #define LL_SBI_ACL            0x10 /* support ACL */
-#define LL_SBI_RMT_CLIENT      0x40 /* remote client */
+/* LL_SBI_RMT_CLIENT            0x40    remote client */
 #define LL_SBI_MDS_CAPA                 0x80 /* support mds capa, obsolete */
 #define LL_SBI_OSS_CAPA                0x100 /* support oss capa, obsolete */
 #define LL_SBI_LOCALFLOCK       0x200 /* Local flocks support by kernel */
@@ -433,7 +409,7 @@ enum stats_track_type {
        "xattr",        \
        "acl",          \
        "???",          \
-       "rmt_client",   \
+       "???",          \
        "mds_capa",     \
        "oss_capa",     \
        "flock",        \
@@ -449,26 +425,6 @@ enum stats_track_type {
        "xattr",        \
 }
 
-#define RCE_HASHES      32
-
-struct rmtacl_ctl_entry {
-       struct list_head       rce_list;
-       pid_t       rce_key; /* hash key */
-       int           rce_ops; /* acl operation type */
-};
-
-struct rmtacl_ctl_table {
-       spinlock_t      rct_lock;
-       struct list_head        rct_entries[RCE_HASHES];
-};
-
-#define EE_HASHES       32
-
-struct eacl_table {
-       spinlock_t      et_lock;
-       struct list_head        et_entries[EE_HASHES];
-};
-
 struct ll_sb_info {
        /* this protects pglist and ra_info.  It isn't safe to
         * grab from interrupt contexts
@@ -497,7 +453,7 @@ struct ll_sb_info {
         * any page which is sent to a server as part of a bulk request,
         * but is uncommitted to stable storage.
         */
-       struct cl_client_cache    ll_cache;
+       struct cl_client_cache    *ll_cache;
 
        struct lprocfs_stats     *ll_ra_stats;
 
@@ -533,8 +489,6 @@ struct ll_sb_info {
        dev_t                     ll_sdev_orig; /* save s_dev before assign for
                                                 * clustered nfs
                                                 */
-       struct rmtacl_ctl_table   ll_rct;
-       struct eacl_table        ll_et;
        __kernel_fsid_t           ll_fsid;
        struct kobject           ll_kobj; /* sysfs object */
        struct super_block      *ll_sb; /* struct super_block (for sysfs code)*/
@@ -640,6 +594,8 @@ struct ll_file_data {
         * false: unknown failure, should report.
         */
        bool fd_write_failed;
+       rwlock_t fd_lock; /* protect lcc list */
+       struct list_head fd_lccs; /* list of ll_cl_context */
 };
 
 struct lov_stripe_md;
@@ -715,8 +671,9 @@ void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
 int ll_readahead(const struct lu_env *env, struct cl_io *io,
                 struct cl_page_list *queue, struct ll_readahead_state *ras,
                 bool hit);
-struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage);
-void ll_cl_fini(struct ll_cl_context *lcc);
+struct ll_cl_context *ll_cl_find(struct file *file);
+void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io);
+void ll_cl_remove(struct file *file, const struct lu_env *env);
 
 extern const struct address_space_operations ll_aops;
 
@@ -858,11 +815,11 @@ struct vvp_io_args {
 };
 
 struct ll_cl_context {
+       struct list_head         lcc_list;
        void       *lcc_cookie;
+       const struct lu_env     *lcc_env;
        struct cl_io   *lcc_io;
        struct cl_page *lcc_page;
-       struct lu_env  *lcc_env;
-       int          lcc_refcheck;
 };
 
 struct ll_thread_info {
@@ -983,14 +940,6 @@ ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
 ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
 int ll_removexattr(struct dentry *dentry, const char *name);
 
-/* llite/remote_perm.c */
-extern struct kmem_cache *ll_remote_perm_cachep;
-extern struct kmem_cache *ll_rmtperm_hash_cachep;
-
-void free_rmtperm_hash(struct hlist_head *hash);
-int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
-int lustre_check_remote_perm(struct inode *inode, int mask);
-
 /**
  * Common IO arguments for various VFS I/O interfaces.
  */
@@ -1004,40 +953,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
 void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
 void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
 
-/* llite/llite_rmtacl.c */
-#ifdef CONFIG_FS_POSIX_ACL
-struct eacl_entry {
-       struct list_head            ee_list;
-       pid_t            ee_key; /* hash key */
-       struct lu_fid    ee_fid;
-       int                ee_type; /* ACL type for ACCESS or DEFAULT */
-       ext_acl_xattr_header *ee_acl;
-};
-
-u64 rce_ops2valid(int ops);
-struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key);
-int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops);
-int rct_del(struct rmtacl_ctl_table *rct, pid_t key);
-void rct_init(struct rmtacl_ctl_table *rct);
-void rct_fini(struct rmtacl_ctl_table *rct);
-
-void ee_free(struct eacl_entry *ee);
-int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
-          ext_acl_xattr_header *header);
-struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
-                                struct lu_fid *fid, int type);
-void et_search_free(struct eacl_table *et, pid_t key);
-void et_init(struct eacl_table *et);
-void et_fini(struct eacl_table *et);
-#else
-static inline u64 rce_ops2valid(int ops)
-{
-       return 0;
-}
-#endif
-
 /* statahead.c */
-
 #define LL_SA_RPC_MIN     2
 #define LL_SA_RPC_DEF     32
 #define LL_SA_RPC_MAX     8192
@@ -1281,7 +1197,7 @@ static inline int ll_file_nolock(const struct file *file)
 static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
                                    struct lookup_intent *it, __u64 *bits)
 {
-       if (!it->d.lustre.it_lock_set) {
+       if (!it->it_lock_set) {
                struct lustre_handle handle;
 
                /* If this inode is a remote object, it will get two
@@ -1292,36 +1208,26 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
                 * LOOKUP and PERM locks, so revoking either locks will
                 * case the dcache being cleared
                 */
-               if (it->d.lustre.it_remote_lock_mode) {
-                       handle.cookie = it->d.lustre.it_remote_lock_handle;
+               if (it->it_remote_lock_mode) {
+                       handle.cookie = it->it_remote_lock_handle;
                        CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for remote lock %#llx\n",
                               PFID(ll_inode2fid(inode)), inode,
                               handle.cookie);
                        md_set_lock_data(exp, &handle.cookie, inode, NULL);
                }
 
-               handle.cookie = it->d.lustre.it_lock_handle;
+               handle.cookie = it->it_lock_handle;
 
                CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for lock %#llx\n",
                       PFID(ll_inode2fid(inode)), inode, handle.cookie);
 
                md_set_lock_data(exp, &handle.cookie, inode,
-                                &it->d.lustre.it_lock_bits);
-               it->d.lustre.it_lock_set = 1;
+                                &it->it_lock_bits);
+               it->it_lock_set = 1;
        }
 
        if (bits)
-               *bits = it->d.lustre.it_lock_bits;
-}
-
-static inline void ll_lock_dcache(struct inode *inode)
-{
-       spin_lock(&inode->i_lock);
-}
-
-static inline void ll_unlock_dcache(struct inode *inode)
-{
-       spin_unlock(&inode->i_lock);
+               *bits = it->it_lock_bits;
 }
 
 static inline int d_lustre_invalid(const struct dentry *dentry)
index 96c7e9fc6e5f7bdf4c2e6a7bce7461fa815c3d4a..546063e728db1cd9e3bb20c9d9276d7d399a7d49 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -87,15 +83,11 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
        pages = si.totalram - si.totalhigh;
        lru_page_max = pages / 2;
 
-       /* initialize ll_cache data */
-       atomic_set(&sbi->ll_cache.ccc_users, 0);
-       sbi->ll_cache.ccc_lru_max = lru_page_max;
-       atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
-       spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
-       INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
-
-       atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
-       init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
+       sbi->ll_cache = cl_cache_init(lru_page_max);
+       if (!sbi->ll_cache) {
+               kfree(sbi);
+               return NULL;
+       }
 
        sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
                                           SBI_DEFAULT_READAHEAD_MAX);
@@ -135,6 +127,11 @@ static void ll_free_sbi(struct super_block *sb)
 {
        struct ll_sb_info *sbi = ll_s2sbi(sb);
 
+       if (sbi->ll_cache) {
+               cl_cache_decref(sbi->ll_cache);
+               sbi->ll_cache = NULL;
+       }
+
        kfree(sbi);
 }
 
@@ -175,8 +172,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
                                  OBD_CONNECT_VERSION  | OBD_CONNECT_BRW_SIZE |
                                  OBD_CONNECT_CANCELSET | OBD_CONNECT_FID     |
                                  OBD_CONNECT_AT       | OBD_CONNECT_LOV_V3   |
-                                 OBD_CONNECT_RMT_CLIENT | OBD_CONNECT_VBR    |
-                                 OBD_CONNECT_FULL20   | OBD_CONNECT_64BITHASH|
+                                 OBD_CONNECT_VBR       | OBD_CONNECT_FULL20  |
+                                 OBD_CONNECT_64BITHASH |
                                  OBD_CONNECT_EINPROGRESS |
                                  OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
                                  OBD_CONNECT_LAYOUTLOCK |
@@ -217,8 +214,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
 
        /* real client */
        data->ocd_connect_flags |= OBD_CONNECT_REAL;
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
-               data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
 
        data->ocd_brw_size = MD_MAX_BRW_SIZE;
 
@@ -311,18 +306,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
                sbi->ll_flags &= ~LL_SBI_ACL;
        }
 
-       if (data->ocd_connect_flags & OBD_CONNECT_RMT_CLIENT) {
-               if (!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
-                       sbi->ll_flags |= LL_SBI_RMT_CLIENT;
-                       LCONSOLE_INFO("client is set as remote by default.\n");
-               }
-       } else {
-               if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
-                       sbi->ll_flags &= ~LL_SBI_RMT_CLIENT;
-                       LCONSOLE_INFO("client claims to be remote, but server rejected, forced to be local.\n");
-               }
-       }
-
        if (data->ocd_connect_flags & OBD_CONNECT_64BITHASH)
                sbi->ll_flags |= LL_SBI_64BIT_HASH;
 
@@ -356,10 +339,9 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
                                  OBD_CONNECT_REQPORTAL | OBD_CONNECT_BRW_SIZE |
                                  OBD_CONNECT_CANCELSET | OBD_CONNECT_FID      |
                                  OBD_CONNECT_SRVLOCK   | OBD_CONNECT_TRUNCLOCK|
-                                 OBD_CONNECT_AT | OBD_CONNECT_RMT_CLIENT |
-                                 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_VBR|
-                                 OBD_CONNECT_FULL20 | OBD_CONNECT_64BITHASH |
-                                 OBD_CONNECT_MAXBYTES |
+                                 OBD_CONNECT_AT        | OBD_CONNECT_OSS_CAPA |
+                                 OBD_CONNECT_VBR       | OBD_CONNECT_FULL20   |
+                                 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES |
                                  OBD_CONNECT_EINPROGRESS |
                                  OBD_CONNECT_JOBSTATS | OBD_CONNECT_LVB_TYPE |
                                  OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_PINGLESS;
@@ -382,8 +364,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
        }
 
        data->ocd_connect_flags |= OBD_CONNECT_LRU_RESIZE;
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
-               data->ocd_connect_flags |= OBD_CONNECT_RMT_CLIENT_FORCE;
 
        CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d\n",
               data->ocd_connect_flags,
@@ -446,9 +426,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
         * XXX: move this to after cbd setup?
         */
        valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
-               valid |= OBD_MD_FLRMTPERM;
-       else if (sbi->ll_flags & LL_SBI_ACL)
+       if (sbi->ll_flags & LL_SBI_ACL)
                valid |= OBD_MD_FLACL;
 
        op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
@@ -504,13 +482,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
                goto out_root;
        }
 
-#ifdef CONFIG_FS_POSIX_ACL
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
-               rct_init(&sbi->ll_rct);
-               et_init(&sbi->ll_et);
-       }
-#endif
-
        checksum = sbi->ll_flags & LL_SBI_CHECKSUM;
        err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
                                 KEY_CHECKSUM, sizeof(checksum), &checksum,
@@ -518,8 +489,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
        cl_sb_init(sb);
 
        err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
-                                KEY_CACHE_SET, sizeof(sbi->ll_cache),
-                                &sbi->ll_cache, NULL);
+                                KEY_CACHE_SET, sizeof(*sbi->ll_cache),
+                                sbi->ll_cache, NULL);
 
        sb->s_root = d_make_root(root);
        if (!sb->s_root) {
@@ -564,8 +535,6 @@ out_lock_cn_cb:
 out_dt:
        obd_disconnect(sbi->ll_dt_exp);
        sbi->ll_dt_exp = NULL;
-       /* Make sure all OScs are gone, since cl_cache is accessing sbi. */
-       obd_zombie_barrier();
 out_md_fid:
        obd_fid_fini(sbi->ll_md_exp->exp_obd);
 out_md:
@@ -608,13 +577,6 @@ static void client_common_put_super(struct super_block *sb)
 {
        struct ll_sb_info *sbi = ll_s2sbi(sb);
 
-#ifdef CONFIG_FS_POSIX_ACL
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
-               et_fini(&sbi->ll_et);
-               rct_fini(&sbi->ll_rct);
-       }
-#endif
-
        ll_close_thread_shutdown(sbi->ll_lcq);
 
        cl_sb_fini(sb);
@@ -622,10 +584,6 @@ static void client_common_put_super(struct super_block *sb)
        obd_fid_fini(sbi->ll_dt_exp->exp_obd);
        obd_disconnect(sbi->ll_dt_exp);
        sbi->ll_dt_exp = NULL;
-       /* wait till all OSCs are gone, since cl_cache is accessing sbi.
-        * see LU-2543.
-        */
-       obd_zombie_barrier();
 
        ldebugfs_unregister_mountpoint(sbi);
 
@@ -704,11 +662,6 @@ static int ll_options(char *options, int *flags)
                        *flags &= ~tmp;
                        goto next;
                }
-               tmp = ll_set_opt("remote_client", s1, LL_SBI_RMT_CLIENT);
-               if (tmp) {
-                       *flags |= tmp;
-                       goto next;
-               }
                tmp = ll_set_opt("user_fid2path", s1, LL_SBI_USER_FID2PATH);
                if (tmp) {
                        *flags |= tmp;
@@ -792,12 +745,9 @@ void ll_lli_init(struct ll_inode_info *lli)
        lli->lli_maxbytes = MAX_LFS_FILESIZE;
        spin_lock_init(&lli->lli_lock);
        lli->lli_posix_acl = NULL;
-       lli->lli_remote_perms = NULL;
-       mutex_init(&lli->lli_rmtperm_mutex);
        /* Do not set lli_fid, it has been initialized already. */
        fid_zero(&lli->lli_pfid);
        INIT_LIST_HEAD(&lli->lli_close_list);
-       lli->lli_rmtperm_time = 0;
        lli->lli_pending_och = NULL;
        lli->lli_mds_read_och = NULL;
        lli->lli_mds_write_och = NULL;
@@ -864,7 +814,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
        try_module_get(THIS_MODULE);
 
        /* client additional sb info */
-       lsi->lsi_llsbi = sbi = ll_init_sbi(sb);
+       sbi = ll_init_sbi(sb);
+       lsi->lsi_llsbi = sbi;
        if (!sbi) {
                module_put(THIS_MODULE);
                kfree(cfg);
@@ -965,12 +916,12 @@ void ll_put_super(struct super_block *sb)
        if (!force) {
                struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
 
-               rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
-                                 !atomic_read(&sbi->ll_cache.ccc_unstable_nr),
+               rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
+                                 !atomic_read(&sbi->ll_cache->ccc_unstable_nr),
                                  &lwi);
        }
 
-       ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
+       ccc_count = atomic_read(&sbi->ll_cache->ccc_unstable_nr);
        if (!force && rc != -EINTR)
                LASSERTF(!ccc_count, "count: %i\n", ccc_count);
 
@@ -1078,17 +1029,9 @@ void ll_clear_inode(struct inode *inode)
 
        ll_xattr_cache_destroy(inode);
 
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
-               LASSERT(!lli->lli_posix_acl);
-               if (lli->lli_remote_perms) {
-                       free_rmtperm_hash(lli->lli_remote_perms);
-                       lli->lli_remote_perms = NULL;
-               }
-       }
 #ifdef CONFIG_FS_POSIX_ACL
-       else if (lli->lli_posix_acl) {
+       if (lli->lli_posix_acl) {
                LASSERT(atomic_read(&lli->lli_posix_acl->a_refcount) == 1);
-               LASSERT(!lli->lli_remote_perms);
                posix_acl_release(lli->lli_posix_acl);
                lli->lli_posix_acl = NULL;
        }
@@ -1540,12 +1483,8 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
                        lli->lli_maxbytes = MAX_LFS_FILESIZE;
        }
 
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
-               if (body->valid & OBD_MD_FLRMTPERM)
-                       ll_update_remote_perm(inode, md->remote_perm);
-       }
 #ifdef CONFIG_FS_POSIX_ACL
-       else if (body->valid & OBD_MD_FLACL) {
+       if (body->valid & OBD_MD_FLACL) {
                spin_lock(&lli->lli_lock);
                if (lli->lli_posix_acl)
                        posix_acl_release(lli->lli_posix_acl);
@@ -1979,7 +1918,13 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
                 * At this point server returns to client's same fid as client
                 * generated for creating. So using ->fid1 is okay here.
                 */
-               LASSERT(fid_is_sane(&md.body->fid1));
+               if (!fid_is_sane(&md.body->fid1)) {
+                       CERROR("%s: Fid is insane " DFID "\n",
+                              ll_get_fsname(sb, NULL, 0),
+                              PFID(&md.body->fid1));
+                       rc = -EINVAL;
+                       goto out;
+               }
 
                *inode = ll_iget(sb, cl_fid_build_ino(&md.body->fid1,
                                             sbi->ll_flags & LL_SBI_32BIT_API),
@@ -2006,11 +1951,11 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
         * 3. proc2: refresh layout and layout lock granted
         * 4. proc1: to apply a stale layout
         */
-       if (it && it->d.lustre.it_lock_mode != 0) {
+       if (it && it->it_lock_mode != 0) {
                struct lustre_handle lockh;
                struct ldlm_lock *lock;
 
-               lockh.cookie = it->d.lustre.it_lock_handle;
+               lockh.cookie = it->it_lock_handle;
                lock = ldlm_handle2lock(&lockh);
                LASSERT(lock);
                if (ldlm_has_layout(lock)) {
index 88ef1cac9e0f1d3cec3feea38a0a83b53c69f549..66ee5db5fce847f1bf0509df4ff9a3adaca886df 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -200,18 +196,11 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
 
        set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
 
-       /* we grab lli_trunc_sem to exclude truncate case.
-        * Otherwise, we could add dirty pages into osc cache
-        * while truncate is on-going.
-        */
        inode = vvp_object_inode(io->ci_obj);
        lli = ll_i2info(inode);
-       down_read(&lli->lli_trunc_sem);
 
        result = cl_io_loop(env, io);
 
-       up_read(&lli->lli_trunc_sem);
-
        cfs_restore_sigs(set);
 
        if (result == 0) {
@@ -315,8 +304,13 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
                vio->u.fault.ft_flags = 0;
                vio->u.fault.ft_flags_valid = false;
 
+               /* May call ll_readpage() */
+               ll_cl_add(vma->vm_file, env, io);
+
                result = cl_io_loop(env, io);
 
+               ll_cl_remove(vma->vm_file, env);
+
                /* ft_flags are only valid if we reached
                 * the call to filemap_fault
                 */
index c1eef6198b25f169fcba7247ba358014f267230e..65972c892731498a76330beeac6c700da7026e39 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -172,6 +168,24 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
 
        /* N.B. d_obtain_alias() drops inode ref on error */
        result = d_obtain_alias(inode);
+       if (!IS_ERR(result)) {
+               int rc;
+
+               rc = ll_d_init(result);
+               if (rc < 0) {
+                       dput(result);
+                       result = ERR_PTR(rc);
+               } else {
+                       struct ll_dentry_data *ldd = ll_d2d(result);
+
+                       /*
+                        * Need to signal to the ll_intent_file_open that
+                        * we came from NFS and so opencache needs to be
+                        * enabled for this one
+                        */
+                       ldd->lld_nfs_dentry = 1;
+               }
+       }
 
        return result;
 }
diff --git a/drivers/staging/lustre/lustre/llite/llite_rmtacl.c b/drivers/staging/lustre/lustre/llite/llite_rmtacl.c
deleted file mode 100644 (file)
index 8509b07..0000000
+++ /dev/null
@@ -1,299 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/llite_rmtacl.c
- *
- * Lustre Remote User Access Control List.
- *
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#include "../include/lustre_lite.h"
-#include "../include/lustre_eacl.h"
-#include "llite_internal.h"
-
-static inline __u32 rce_hashfunc(uid_t id)
-{
-       return id & (RCE_HASHES - 1);
-}
-
-static inline __u32 ee_hashfunc(uid_t id)
-{
-       return id & (EE_HASHES - 1);
-}
-
-u64 rce_ops2valid(int ops)
-{
-       switch (ops) {
-       case RMT_LSETFACL:
-               return OBD_MD_FLRMTLSETFACL;
-       case RMT_LGETFACL:
-               return OBD_MD_FLRMTLGETFACL;
-       case RMT_RSETFACL:
-               return OBD_MD_FLRMTRSETFACL;
-       case RMT_RGETFACL:
-               return OBD_MD_FLRMTRGETFACL;
-       default:
-               return 0;
-       }
-}
-
-static struct rmtacl_ctl_entry *rce_alloc(pid_t key, int ops)
-{
-       struct rmtacl_ctl_entry *rce;
-
-       rce = kzalloc(sizeof(*rce), GFP_NOFS);
-       if (!rce)
-               return NULL;
-
-       INIT_LIST_HEAD(&rce->rce_list);
-       rce->rce_key = key;
-       rce->rce_ops = ops;
-
-       return rce;
-}
-
-static void rce_free(struct rmtacl_ctl_entry *rce)
-{
-       if (!list_empty(&rce->rce_list))
-               list_del(&rce->rce_list);
-
-       kfree(rce);
-}
-
-static struct rmtacl_ctl_entry *__rct_search(struct rmtacl_ctl_table *rct,
-                                            pid_t key)
-{
-       struct rmtacl_ctl_entry *rce;
-       struct list_head *head = &rct->rct_entries[rce_hashfunc(key)];
-
-       list_for_each_entry(rce, head, rce_list)
-               if (rce->rce_key == key)
-                       return rce;
-
-       return NULL;
-}
-
-struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key)
-{
-       struct rmtacl_ctl_entry *rce;
-
-       spin_lock(&rct->rct_lock);
-       rce = __rct_search(rct, key);
-       spin_unlock(&rct->rct_lock);
-       return rce;
-}
-
-int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops)
-{
-       struct rmtacl_ctl_entry *rce, *e;
-
-       rce = rce_alloc(key, ops);
-       if (!rce)
-               return -ENOMEM;
-
-       spin_lock(&rct->rct_lock);
-       e = __rct_search(rct, key);
-       if (unlikely(e)) {
-               CWARN("Unexpected stale rmtacl_entry found: [key: %d] [ops: %d]\n",
-                     (int)key, ops);
-               rce_free(e);
-       }
-       list_add_tail(&rce->rce_list, &rct->rct_entries[rce_hashfunc(key)]);
-       spin_unlock(&rct->rct_lock);
-
-       return 0;
-}
-
-int rct_del(struct rmtacl_ctl_table *rct, pid_t key)
-{
-       struct rmtacl_ctl_entry *rce;
-
-       spin_lock(&rct->rct_lock);
-       rce = __rct_search(rct, key);
-       if (rce)
-               rce_free(rce);
-       spin_unlock(&rct->rct_lock);
-
-       return rce ? 0 : -ENOENT;
-}
-
-void rct_init(struct rmtacl_ctl_table *rct)
-{
-       int i;
-
-       spin_lock_init(&rct->rct_lock);
-       for (i = 0; i < RCE_HASHES; i++)
-               INIT_LIST_HEAD(&rct->rct_entries[i]);
-}
-
-void rct_fini(struct rmtacl_ctl_table *rct)
-{
-       struct rmtacl_ctl_entry *rce;
-       int i;
-
-       spin_lock(&rct->rct_lock);
-       for (i = 0; i < RCE_HASHES; i++)
-               while (!list_empty(&rct->rct_entries[i])) {
-                       rce = list_entry(rct->rct_entries[i].next,
-                                        struct rmtacl_ctl_entry, rce_list);
-                       rce_free(rce);
-               }
-       spin_unlock(&rct->rct_lock);
-}
-
-static struct eacl_entry *ee_alloc(pid_t key, struct lu_fid *fid, int type,
-                                  ext_acl_xattr_header *header)
-{
-       struct eacl_entry *ee;
-
-       ee = kzalloc(sizeof(*ee), GFP_NOFS);
-       if (!ee)
-               return NULL;
-
-       INIT_LIST_HEAD(&ee->ee_list);
-       ee->ee_key = key;
-       ee->ee_fid = *fid;
-       ee->ee_type = type;
-       ee->ee_acl = header;
-
-       return ee;
-}
-
-void ee_free(struct eacl_entry *ee)
-{
-       if (!list_empty(&ee->ee_list))
-               list_del(&ee->ee_list);
-
-       if (ee->ee_acl)
-               lustre_ext_acl_xattr_free(ee->ee_acl);
-
-       kfree(ee);
-}
-
-static struct eacl_entry *__et_search_del(struct eacl_table *et, pid_t key,
-                                         struct lu_fid *fid, int type)
-{
-       struct eacl_entry *ee;
-       struct list_head *head = &et->et_entries[ee_hashfunc(key)];
-
-       LASSERT(fid);
-       list_for_each_entry(ee, head, ee_list)
-               if (ee->ee_key == key) {
-                       if (lu_fid_eq(&ee->ee_fid, fid) &&
-                           ee->ee_type == type) {
-                               list_del_init(&ee->ee_list);
-                               return ee;
-                       }
-               }
-
-       return NULL;
-}
-
-struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
-                                struct lu_fid *fid, int type)
-{
-       struct eacl_entry *ee;
-
-       spin_lock(&et->et_lock);
-       ee = __et_search_del(et, key, fid, type);
-       spin_unlock(&et->et_lock);
-       return ee;
-}
-
-void et_search_free(struct eacl_table *et, pid_t key)
-{
-       struct eacl_entry *ee, *next;
-       struct list_head *head = &et->et_entries[ee_hashfunc(key)];
-
-       spin_lock(&et->et_lock);
-       list_for_each_entry_safe(ee, next, head, ee_list)
-               if (ee->ee_key == key)
-                       ee_free(ee);
-
-       spin_unlock(&et->et_lock);
-}
-
-int ee_add(struct eacl_table *et, pid_t key, struct lu_fid *fid, int type,
-          ext_acl_xattr_header *header)
-{
-       struct eacl_entry *ee, *e;
-
-       ee = ee_alloc(key, fid, type, header);
-       if (!ee)
-               return -ENOMEM;
-
-       spin_lock(&et->et_lock);
-       e = __et_search_del(et, key, fid, type);
-       if (unlikely(e)) {
-               CWARN("Unexpected stale eacl_entry found: [key: %d] [fid: " DFID "] [type: %d]\n",
-                     (int)key, PFID(fid), type);
-               ee_free(e);
-       }
-       list_add_tail(&ee->ee_list, &et->et_entries[ee_hashfunc(key)]);
-       spin_unlock(&et->et_lock);
-
-       return 0;
-}
-
-void et_init(struct eacl_table *et)
-{
-       int i;
-
-       spin_lock_init(&et->et_lock);
-       for (i = 0; i < EE_HASHES; i++)
-               INIT_LIST_HEAD(&et->et_entries[i]);
-}
-
-void et_fini(struct eacl_table *et)
-{
-       struct eacl_entry *ee;
-       int i;
-
-       spin_lock(&et->et_lock);
-       for (i = 0; i < EE_HASHES; i++)
-               while (!list_empty(&et->et_entries[i])) {
-                       ee = list_entry(et->et_entries[i].next,
-                                       struct eacl_entry, ee_list);
-                       ee_free(ee);
-               }
-       spin_unlock(&et->et_lock);
-}
-
-#endif
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
deleted file mode 100644 (file)
index 813a9a3..0000000
+++ /dev/null
@@ -1,883 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-/*
- *  linux/drivers/block/loop.c
- *
- *  Written by Theodore Ts'o, 3/29/93
- *
- * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
- * permitted under the GNU General Public License.
- *
- * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
- * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
- *
- * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
- *
- * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
- *
- * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
- *
- * Loadable modules and other fixes by AK, 1998
- *
- * Maximum number of loop devices now dynamic via max_loop module parameter.
- * Russell Kroll <rkroll@exploits.org> 19990701
- *
- * Maximum number of loop devices when compiled-in now selectable by passing
- * max_loop=<1-255> to the kernel on boot.
- * Erik I. Bols?, <eriki@himolde.no>, Oct 31, 1999
- *
- * Completely rewrite request handling to be make_request_fn style and
- * non blocking, pushing work to a helper thread. Lots of fixes from
- * Al Viro too.
- * Jens Axboe <axboe@suse.de>, Nov 2000
- *
- * Support up to 256 loop devices
- * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
- *
- * Support for falling back on the write file operation when the address space
- * operations prepare_write and/or commit_write are not available on the
- * backing filesystem.
- * Anton Altaparmakov, 16 Feb 2005
- *
- * Still To Fix:
- * - Advisory locking is ignored here.
- * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/stat.h>
-#include <linux/errno.h>
-#include <linux/major.h>
-#include <linux/wait.h>
-#include <linux/blkdev.h>
-#include <linux/blkpg.h>
-#include <linux/init.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/writeback.h>
-#include <linux/buffer_head.h>         /* for invalidate_bdev() */
-#include <linux/completion.h>
-#include <linux/highmem.h>
-#include <linux/gfp.h>
-#include <linux/pagevec.h>
-#include <linux/uaccess.h>
-
-#include "../include/lustre_lib.h"
-#include "../include/lustre_lite.h"
-#include "llite_internal.h"
-
-#define LLOOP_MAX_SEGMENTS     LNET_MAX_IOV
-
-/* Possible states of device */
-enum {
-       LLOOP_UNBOUND,
-       LLOOP_BOUND,
-       LLOOP_RUNDOWN,
-};
-
-struct lloop_device {
-       int               lo_number;
-       int               lo_refcnt;
-       loff_t         lo_offset;
-       loff_t         lo_sizelimit;
-       int               lo_flags;
-       struct file      *lo_backing_file;
-       struct block_device *lo_device;
-       unsigned             lo_blocksize;
-
-       gfp_t             old_gfp_mask;
-
-       spinlock_t              lo_lock;
-       struct bio              *lo_bio;
-       struct bio              *lo_biotail;
-       int                     lo_state;
-       struct semaphore        lo_sem;
-       struct mutex            lo_ctl_mutex;
-       atomic_t         lo_pending;
-       wait_queue_head_t         lo_bh_wait;
-
-       struct request_queue *lo_queue;
-
-       const struct lu_env *lo_env;
-       struct cl_io     lo_io;
-       struct ll_dio_pages  lo_pvec;
-
-       /* data to handle bio for lustre. */
-       struct lo_request_data {
-               struct page *lrd_pages[LLOOP_MAX_SEGMENTS];
-               loff_t       lrd_offsets[LLOOP_MAX_SEGMENTS];
-       } lo_requests[1];
-};
-
-/*
- * Loop flags
- */
-enum {
-       LO_FLAGS_READ_ONLY       = 1,
-};
-
-static int lloop_major;
-#define MAX_LOOP_DEFAULT  16
-static int max_loop = MAX_LOOP_DEFAULT;
-static struct lloop_device *loop_dev;
-static struct gendisk **disks;
-static struct mutex lloop_mutex;
-static void *ll_iocontrol_magic;
-
-static loff_t get_loop_size(struct lloop_device *lo, struct file *file)
-{
-       loff_t size, offset, loopsize;
-
-       /* Compute loopsize in bytes */
-       size = i_size_read(file->f_mapping->host);
-       offset = lo->lo_offset;
-       loopsize = size - offset;
-       if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
-               loopsize = lo->lo_sizelimit;
-
-       /*
-        * Unfortunately, if we want to do I/O on the device,
-        * the number of 512-byte sectors has to fit into a sector_t.
-        */
-       return loopsize >> 9;
-}
-
-static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
-{
-       const struct lu_env  *env   = lo->lo_env;
-       struct cl_io     *io    = &lo->lo_io;
-       struct inode     *inode = file_inode(lo->lo_backing_file);
-       struct cl_object     *obj = ll_i2info(inode)->lli_clob;
-       pgoff_t        offset;
-       int                ret;
-       int                rw;
-       u32                page_count = 0;
-       struct bio_vec       bvec;
-       struct bvec_iter   iter;
-       struct bio         *bio;
-       ssize_t        bytes;
-
-       struct ll_dio_pages  *pvec = &lo->lo_pvec;
-       struct page      **pages = pvec->ldp_pages;
-       loff_t         *offsets = pvec->ldp_offsets;
-
-       truncate_inode_pages(inode->i_mapping, 0);
-
-       /* initialize the IO */
-       memset(io, 0, sizeof(*io));
-       io->ci_obj = obj;
-       ret = cl_io_init(env, io, CIT_MISC, obj);
-       if (ret)
-               return io->ci_result;
-       io->ci_lockreq = CILR_NEVER;
-
-       rw = head->bi_rw;
-       for (bio = head; bio ; bio = bio->bi_next) {
-               LASSERT(rw == bio->bi_rw);
-
-               offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
-               bio_for_each_segment(bvec, bio, iter) {
-                       BUG_ON(bvec.bv_offset != 0);
-                       BUG_ON(bvec.bv_len != PAGE_SIZE);
-
-                       pages[page_count] = bvec.bv_page;
-                       offsets[page_count] = offset;
-                       page_count++;
-                       offset += bvec.bv_len;
-               }
-               LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
-       }
-
-       ll_stats_ops_tally(ll_i2sbi(inode),
-                       (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
-                       page_count);
-
-       pvec->ldp_size = page_count << PAGE_SHIFT;
-       pvec->ldp_nr = page_count;
-
-       /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
-        * write those pages into OST. Even worse case is that more pages
-        * would be asked to write out to swap space, and then finally get here
-        * again.
-        * Unfortunately this is NOT easy to fix.
-        * Thoughts on solution:
-        * 0. Define a reserved pool for cl_pages, which could be a list of
-        *    pre-allocated cl_pages;
-        * 1. Define a new operation in cl_object_operations{}, says clo_depth,
-        *    which measures how many layers for this lustre object. Generally
-        *    speaking, the depth would be 2, one for llite, and one for lovsub.
-        *    However, for SNS, there will be more since we need additional page
-        *    to store parity;
-        * 2. Reserve the # of (page_count * depth) cl_pages from the reserved
-        *    pool. Afterwards, the clio would allocate the pages from reserved
-        *    pool, this guarantees we needn't allocate the cl_pages from
-        *    generic cl_page slab cache.
-        *    Of course, if there is NOT enough pages in the pool, we might
-        *    be asked to write less pages once, this purely depends on
-        *    implementation. Anyway, we should be careful to avoid deadlocking.
-        */
-       inode_lock(inode);
-       bytes = ll_direct_rw_pages(env, io, rw, inode, pvec);
-       inode_unlock(inode);
-       cl_io_fini(env, io);
-       return (bytes == pvec->ldp_size) ? 0 : (int)bytes;
-}
-
-/*
- * Add bio to back of pending list
- */
-static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&lo->lo_lock, flags);
-       if (lo->lo_biotail) {
-               lo->lo_biotail->bi_next = bio;
-               lo->lo_biotail = bio;
-       } else {
-               lo->lo_bio = lo->lo_biotail = bio;
-       }
-       spin_unlock_irqrestore(&lo->lo_lock, flags);
-
-       atomic_inc(&lo->lo_pending);
-       if (waitqueue_active(&lo->lo_bh_wait))
-               wake_up(&lo->lo_bh_wait);
-}
-
-/*
- * Grab first pending buffer
- */
-static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
-{
-       struct bio *first;
-       struct bio **bio;
-       unsigned int count = 0;
-       unsigned int page_count = 0;
-       int rw;
-
-       spin_lock_irq(&lo->lo_lock);
-       first = lo->lo_bio;
-       if (unlikely(!first)) {
-               spin_unlock_irq(&lo->lo_lock);
-               return 0;
-       }
-
-       /* TODO: need to split the bio, too bad. */
-       LASSERT(first->bi_vcnt <= LLOOP_MAX_SEGMENTS);
-
-       rw = first->bi_rw;
-       bio = &lo->lo_bio;
-       while (*bio && (*bio)->bi_rw == rw) {
-               CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u\n",
-                      (unsigned long long)(*bio)->bi_iter.bi_sector,
-                      (*bio)->bi_iter.bi_size,
-                      page_count, (*bio)->bi_vcnt);
-               if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
-                       break;
-
-               page_count += (*bio)->bi_vcnt;
-               count++;
-               bio = &(*bio)->bi_next;
-       }
-       if (*bio) {
-               /* Some of bios can't be mergeable. */
-               lo->lo_bio = *bio;
-               *bio = NULL;
-       } else {
-               /* Hit the end of queue */
-               lo->lo_biotail = NULL;
-               lo->lo_bio = NULL;
-       }
-       *req = first;
-       spin_unlock_irq(&lo->lo_lock);
-       return count;
-}
-
-static blk_qc_t loop_make_request(struct request_queue *q, struct bio *old_bio)
-{
-       struct lloop_device *lo = q->queuedata;
-       int rw = bio_rw(old_bio);
-       int inactive;
-
-       blk_queue_split(q, &old_bio, q->bio_split);
-
-       if (!lo)
-               goto err;
-
-       CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
-              (unsigned long long)old_bio->bi_iter.bi_sector,
-              old_bio->bi_iter.bi_size);
-
-       spin_lock_irq(&lo->lo_lock);
-       inactive = lo->lo_state != LLOOP_BOUND;
-       spin_unlock_irq(&lo->lo_lock);
-       if (inactive)
-               goto err;
-
-       if (rw == WRITE) {
-               if (lo->lo_flags & LO_FLAGS_READ_ONLY)
-                       goto err;
-       } else if (rw == READA) {
-               rw = READ;
-       } else if (rw != READ) {
-               CERROR("lloop: unknown command (%x)\n", rw);
-               goto err;
-       }
-       loop_add_bio(lo, old_bio);
-       return BLK_QC_T_NONE;
-err:
-       bio_io_error(old_bio);
-       return BLK_QC_T_NONE;
-}
-
-static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
-{
-       int ret;
-
-       ret = do_bio_lustrebacked(lo, bio);
-       while (bio) {
-               struct bio *tmp = bio->bi_next;
-
-               bio->bi_next = NULL;
-               bio->bi_error = ret;
-               bio_endio(bio);
-               bio = tmp;
-       }
-}
-
-static inline int loop_active(struct lloop_device *lo)
-{
-       return atomic_read(&lo->lo_pending) ||
-               (lo->lo_state == LLOOP_RUNDOWN);
-}
-
-/*
- * worker thread that handles reads/writes to file backed loop devices,
- * to avoid blocking in our make_request_fn.
- */
-static int loop_thread(void *data)
-{
-       struct lloop_device *lo = data;
-       struct bio *bio;
-       unsigned int count;
-       unsigned long times = 0;
-       unsigned long total_count = 0;
-
-       struct lu_env *env;
-       int refcheck;
-       int ret = 0;
-
-       set_user_nice(current, MIN_NICE);
-
-       lo->lo_state = LLOOP_BOUND;
-
-       env = cl_env_get(&refcheck);
-       if (IS_ERR(env)) {
-               ret = PTR_ERR(env);
-               goto out;
-       }
-
-       lo->lo_env = env;
-       memset(&lo->lo_pvec, 0, sizeof(lo->lo_pvec));
-       lo->lo_pvec.ldp_pages   = lo->lo_requests[0].lrd_pages;
-       lo->lo_pvec.ldp_offsets = lo->lo_requests[0].lrd_offsets;
-
-       /*
-        * up sem, we are running
-        */
-       up(&lo->lo_sem);
-
-       for (;;) {
-               wait_event(lo->lo_bh_wait, loop_active(lo));
-               if (!atomic_read(&lo->lo_pending)) {
-                       int exiting = 0;
-
-                       spin_lock_irq(&lo->lo_lock);
-                       exiting = (lo->lo_state == LLOOP_RUNDOWN);
-                       spin_unlock_irq(&lo->lo_lock);
-                       if (exiting)
-                               break;
-               }
-
-               bio = NULL;
-               count = loop_get_bio(lo, &bio);
-               if (!count) {
-                       CWARN("lloop(minor: %d): missing bio\n", lo->lo_number);
-                       continue;
-               }
-
-               total_count += count;
-               if (total_count < count) {     /* overflow */
-                       total_count = count;
-                       times = 1;
-               } else {
-                       times++;
-               }
-               if ((times & 127) == 0) {
-                       CDEBUG(D_INFO, "total: %lu, count: %lu, avg: %lu\n",
-                              total_count, times, total_count / times);
-               }
-
-               LASSERT(bio);
-               LASSERT(count <= atomic_read(&lo->lo_pending));
-               loop_handle_bio(lo, bio);
-               atomic_sub(count, &lo->lo_pending);
-       }
-       cl_env_put(env, &refcheck);
-
-out:
-       up(&lo->lo_sem);
-       return ret;
-}
-
-static int loop_set_fd(struct lloop_device *lo, struct file *unused,
-                      struct block_device *bdev, struct file *file)
-{
-       struct inode     *inode;
-       struct address_space *mapping;
-       int                lo_flags = 0;
-       int                error;
-       loff_t          size;
-
-       if (!try_module_get(THIS_MODULE))
-               return -ENODEV;
-
-       error = -EBUSY;
-       if (lo->lo_state != LLOOP_UNBOUND)
-               goto out;
-
-       mapping = file->f_mapping;
-       inode = mapping->host;
-
-       error = -EINVAL;
-       if (!S_ISREG(inode->i_mode) || inode->i_sb->s_magic != LL_SUPER_MAGIC)
-               goto out;
-
-       if (!(file->f_mode & FMODE_WRITE))
-               lo_flags |= LO_FLAGS_READ_ONLY;
-
-       size = get_loop_size(lo, file);
-
-       if ((loff_t)(sector_t)size != size) {
-               error = -EFBIG;
-               goto out;
-       }
-
-       /* remove all pages in cache so as dirty pages not to be existent. */
-       truncate_inode_pages(mapping, 0);
-
-       set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
-
-       lo->lo_blocksize = PAGE_SIZE;
-       lo->lo_device = bdev;
-       lo->lo_flags = lo_flags;
-       lo->lo_backing_file = file;
-       lo->lo_sizelimit = 0;
-       lo->old_gfp_mask = mapping_gfp_mask(mapping);
-       mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
-
-       lo->lo_bio = lo->lo_biotail = NULL;
-
-       /*
-        * set queue make_request_fn, and add limits based on lower level
-        * device
-        */
-       blk_queue_make_request(lo->lo_queue, loop_make_request);
-       lo->lo_queue->queuedata = lo;
-
-       /* queue parameters */
-       CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
-       blk_queue_logical_block_size(lo->lo_queue,
-                                    (unsigned short)PAGE_SIZE);
-       blk_queue_max_hw_sectors(lo->lo_queue,
-                                LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
-       blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
-
-       set_capacity(disks[lo->lo_number], size);
-       bd_set_size(bdev, size << 9);
-
-       set_blocksize(bdev, lo->lo_blocksize);
-
-       kthread_run(loop_thread, lo, "lloop%d", lo->lo_number);
-       down(&lo->lo_sem);
-       return 0;
-
-out:
-       /* This is safe: open() is still holding a reference. */
-       module_put(THIS_MODULE);
-       return error;
-}
-
-static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
-                      int count)
-{
-       struct file *filp = lo->lo_backing_file;
-       gfp_t gfp = lo->old_gfp_mask;
-
-       if (lo->lo_state != LLOOP_BOUND)
-               return -ENXIO;
-
-       if (lo->lo_refcnt > count)      /* we needed one fd for the ioctl */
-               return -EBUSY;
-
-       if (!filp)
-               return -EINVAL;
-
-       spin_lock_irq(&lo->lo_lock);
-       lo->lo_state = LLOOP_RUNDOWN;
-       spin_unlock_irq(&lo->lo_lock);
-       wake_up(&lo->lo_bh_wait);
-
-       down(&lo->lo_sem);
-       lo->lo_backing_file = NULL;
-       lo->lo_device = NULL;
-       lo->lo_offset = 0;
-       lo->lo_sizelimit = 0;
-       lo->lo_flags = 0;
-       invalidate_bdev(bdev);
-       set_capacity(disks[lo->lo_number], 0);
-       bd_set_size(bdev, 0);
-       mapping_set_gfp_mask(filp->f_mapping, gfp);
-       lo->lo_state = LLOOP_UNBOUND;
-       fput(filp);
-       /* This is safe: open() is still holding a reference. */
-       module_put(THIS_MODULE);
-       return 0;
-}
-
-static int lo_open(struct block_device *bdev, fmode_t mode)
-{
-       struct lloop_device *lo = bdev->bd_disk->private_data;
-
-       mutex_lock(&lo->lo_ctl_mutex);
-       lo->lo_refcnt++;
-       mutex_unlock(&lo->lo_ctl_mutex);
-
-       return 0;
-}
-
-static void lo_release(struct gendisk *disk, fmode_t mode)
-{
-       struct lloop_device *lo = disk->private_data;
-
-       mutex_lock(&lo->lo_ctl_mutex);
-       --lo->lo_refcnt;
-       mutex_unlock(&lo->lo_ctl_mutex);
-}
-
-/* lloop device node's ioctl function. */
-static int lo_ioctl(struct block_device *bdev, fmode_t mode,
-                   unsigned int cmd, unsigned long arg)
-{
-       struct lloop_device *lo = bdev->bd_disk->private_data;
-       struct inode *inode = NULL;
-       int err = 0;
-
-       mutex_lock(&lloop_mutex);
-       switch (cmd) {
-       case LL_IOC_LLOOP_DETACH: {
-               err = loop_clr_fd(lo, bdev, 2);
-               if (err == 0)
-                       blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
-               break;
-       }
-
-       case LL_IOC_LLOOP_INFO: {
-               struct lu_fid fid;
-
-               if (!lo->lo_backing_file) {
-                       err = -ENOENT;
-                       break;
-               }
-               if (!inode)
-                       inode = file_inode(lo->lo_backing_file);
-               if (lo->lo_state == LLOOP_BOUND)
-                       fid = ll_i2info(inode)->lli_fid;
-               else
-                       fid_zero(&fid);
-
-               if (copy_to_user((void __user *)arg, &fid, sizeof(fid)))
-                       err = -EFAULT;
-               break;
-       }
-
-       default:
-               err = -EINVAL;
-               break;
-       }
-       mutex_unlock(&lloop_mutex);
-
-       return err;
-}
-
-static struct block_device_operations lo_fops = {
-       .owner =        THIS_MODULE,
-       .open =  lo_open,
-       .release =      lo_release,
-       .ioctl =        lo_ioctl,
-};
-
-/* dynamic iocontrol callback.
- * This callback is registered in lloop_init and will be called by
- * ll_iocontrol_call.
- *
- * This is a llite regular file ioctl function. It takes the responsibility
- * of attaching or detaching a file by a lloop's device number.
- */
-static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
-                                  unsigned int cmd, unsigned long arg,
-                                  void *magic, int *rcp)
-{
-       struct lloop_device *lo = NULL;
-       struct block_device *bdev = NULL;
-       int err = 0;
-       dev_t dev;
-
-       if (magic != ll_iocontrol_magic)
-               return LLIOC_CONT;
-
-       if (!disks) {
-               err = -ENODEV;
-               goto out1;
-       }
-
-       CWARN("Enter llop_ioctl\n");
-
-       mutex_lock(&lloop_mutex);
-       switch (cmd) {
-       case LL_IOC_LLOOP_ATTACH: {
-               struct lloop_device *lo_free = NULL;
-               int i;
-
-               for (i = 0; i < max_loop; i++, lo = NULL) {
-                       lo = &loop_dev[i];
-                       if (lo->lo_state == LLOOP_UNBOUND) {
-                               if (!lo_free)
-                                       lo_free = lo;
-                               continue;
-                       }
-                       if (file_inode(lo->lo_backing_file) == file_inode(file))
-                               break;
-               }
-               if (lo || !lo_free) {
-                       err = -EBUSY;
-                       goto out;
-               }
-
-               lo = lo_free;
-               dev = MKDEV(lloop_major, lo->lo_number);
-
-               /* quit if the used pointer is writable */
-               if (put_user((long)old_encode_dev(dev), (long __user *)arg)) {
-                       err = -EFAULT;
-                       goto out;
-               }
-
-               bdev = blkdev_get_by_dev(dev, file->f_mode, NULL);
-               if (IS_ERR(bdev)) {
-                       err = PTR_ERR(bdev);
-                       goto out;
-               }
-
-               get_file(file);
-               err = loop_set_fd(lo, NULL, bdev, file);
-               if (err) {
-                       fput(file);
-                       blkdev_put(bdev, 0);
-               }
-
-               break;
-       }
-
-       case LL_IOC_LLOOP_DETACH_BYDEV: {
-               int minor;
-
-               dev = old_decode_dev(arg);
-               if (MAJOR(dev) != lloop_major) {
-                       err = -EINVAL;
-                       goto out;
-               }
-
-               minor = MINOR(dev);
-               if (minor > max_loop - 1) {
-                       err = -EINVAL;
-                       goto out;
-               }
-
-               lo = &loop_dev[minor];
-               if (lo->lo_state != LLOOP_BOUND) {
-                       err = -EINVAL;
-                       goto out;
-               }
-
-               bdev = lo->lo_device;
-               err = loop_clr_fd(lo, bdev, 1);
-               if (err == 0)
-                       blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
-
-               break;
-       }
-
-       default:
-               err = -EINVAL;
-               break;
-       }
-
-out:
-       mutex_unlock(&lloop_mutex);
-out1:
-       if (rcp)
-               *rcp = err;
-       return LLIOC_STOP;
-}
-
-static int __init lloop_init(void)
-{
-       int     i;
-       unsigned int cmdlist[] = {
-               LL_IOC_LLOOP_ATTACH,
-               LL_IOC_LLOOP_DETACH_BYDEV,
-       };
-
-       if (max_loop < 1 || max_loop > 256) {
-               max_loop = MAX_LOOP_DEFAULT;
-               CWARN("lloop: invalid max_loop (must be between 1 and 256), using default (%u)\n",
-                     max_loop);
-       }
-
-       lloop_major = register_blkdev(0, "lloop");
-       if (lloop_major < 0)
-               return -EIO;
-
-       CDEBUG(D_CONFIG, "registered lloop major %d with %u minors\n",
-              lloop_major, max_loop);
-
-       ll_iocontrol_magic = ll_iocontrol_register(lloop_ioctl, 2, cmdlist);
-       if (!ll_iocontrol_magic)
-               goto out_mem1;
-
-       loop_dev = kcalloc(max_loop, sizeof(*loop_dev), GFP_KERNEL);
-       if (!loop_dev)
-               goto out_mem1;
-
-       disks = kcalloc(max_loop, sizeof(*disks), GFP_KERNEL);
-       if (!disks)
-               goto out_mem2;
-
-       for (i = 0; i < max_loop; i++) {
-               disks[i] = alloc_disk(1);
-               if (!disks[i])
-                       goto out_mem3;
-       }
-
-       mutex_init(&lloop_mutex);
-
-       for (i = 0; i < max_loop; i++) {
-               struct lloop_device *lo = &loop_dev[i];
-               struct gendisk *disk = disks[i];
-
-               lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
-               if (!lo->lo_queue)
-                       goto out_mem4;
-
-               mutex_init(&lo->lo_ctl_mutex);
-               sema_init(&lo->lo_sem, 0);
-               init_waitqueue_head(&lo->lo_bh_wait);
-               lo->lo_number = i;
-               spin_lock_init(&lo->lo_lock);
-               disk->major = lloop_major;
-               disk->first_minor = i;
-               disk->fops = &lo_fops;
-               sprintf(disk->disk_name, "lloop%d", i);
-               disk->private_data = lo;
-               disk->queue = lo->lo_queue;
-       }
-
-       /* We cannot fail after we call this, so another loop!*/
-       for (i = 0; i < max_loop; i++)
-               add_disk(disks[i]);
-       return 0;
-
-out_mem4:
-       while (i--)
-               blk_cleanup_queue(loop_dev[i].lo_queue);
-       i = max_loop;
-out_mem3:
-       while (i--)
-               put_disk(disks[i]);
-       kfree(disks);
-out_mem2:
-       kfree(loop_dev);
-out_mem1:
-       unregister_blkdev(lloop_major, "lloop");
-       ll_iocontrol_unregister(ll_iocontrol_magic);
-       CERROR("lloop: ran out of memory\n");
-       return -ENOMEM;
-}
-
-static void lloop_exit(void)
-{
-       int i;
-
-       ll_iocontrol_unregister(ll_iocontrol_magic);
-       for (i = 0; i < max_loop; i++) {
-               del_gendisk(disks[i]);
-               blk_cleanup_queue(loop_dev[i].lo_queue);
-               put_disk(disks[i]);
-       }
-
-       unregister_blkdev(lloop_major, "lloop");
-
-       kfree(disks);
-       kfree(loop_dev);
-}
-
-module_param(max_loop, int, 0444);
-MODULE_PARM_DESC(max_loop, "maximum of lloop_device");
-MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre virtual block device");
-MODULE_VERSION(LUSTRE_VERSION_STRING);
-MODULE_LICENSE("GPL");
-
-module_init(lloop_init);
-module_exit(lloop_exit);
index 55d62eb11957deb36b34116a4bcde0c2682b2047..e86bf3c53be3722e076a67c9314994902e280456 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -180,11 +176,7 @@ LUSTRE_RO_ATTR(filesfree);
 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
                                char *buf)
 {
-       struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
-                                             ll_kobj);
-
-       return sprintf(buf, "%s client\n",
-                       sbi->ll_flags & LL_SBI_RMT_CLIENT ? "remote" : "local");
+       return sprintf(buf, "local client\n");
 }
 LUSTRE_RO_ATTR(client_type);
 
@@ -364,7 +356,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
 {
        struct super_block     *sb    = m->private;
        struct ll_sb_info      *sbi   = ll_s2sbi(sb);
-       struct cl_client_cache *cache = &sbi->ll_cache;
+       struct cl_client_cache *cache = sbi->ll_cache;
        int shift = 20 - PAGE_SHIFT;
        int max_cached_mb;
        int unused_mb;
@@ -391,7 +383,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
 {
        struct super_block *sb = ((struct seq_file *)file->private_data)->private;
        struct ll_sb_info *sbi = ll_s2sbi(sb);
-       struct cl_client_cache *cache = &sbi->ll_cache;
+       struct cl_client_cache *cache = sbi->ll_cache;
        struct lu_env *env;
        int refcheck;
        int mult, rc, pages_number;
@@ -830,7 +822,7 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
 {
        struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
                                              ll_kobj);
-       struct cl_client_cache *cache = &sbi->ll_cache;
+       struct cl_client_cache *cache = sbi->ll_cache;
        int pages, mb;
 
        pages = atomic_read(&cache->ccc_unstable_nr);
index 5eba0ebae10f1be9cbe2ddddf29caaa4297666ce..3664bfd0178b34b09c1791cfd1190f6b9c035ea2 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -144,7 +140,7 @@ static void ll_invalidate_negative_children(struct inode *dir)
 {
        struct dentry *dentry, *tmp_subdir;
 
-       ll_lock_dcache(dir);
+       spin_lock(&dir->i_lock);
        hlist_for_each_entry(dentry, &dir->i_dentry, d_u.d_alias) {
                spin_lock(&dentry->d_lock);
                if (!list_empty(&dentry->d_subdirs)) {
@@ -159,7 +155,7 @@ static void ll_invalidate_negative_children(struct inode *dir)
                }
                spin_unlock(&dentry->d_lock);
        }
-       ll_unlock_dcache(dir);
+       spin_unlock(&dir->i_lock);
 }
 
 int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
@@ -318,9 +314,10 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
        if (hlist_empty(&inode->i_dentry))
                return NULL;
 
-       discon_alias = invalid_alias = NULL;
+       discon_alias = NULL;
+       invalid_alias = NULL;
 
-       ll_lock_dcache(inode);
+       spin_lock(&inode->i_lock);
        hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
                LASSERT(alias != dentry);
 
@@ -345,7 +342,7 @@ static struct dentry *ll_find_alias(struct inode *inode, struct dentry *dentry)
                dget_dlock(alias);
                spin_unlock(&alias->d_lock);
        }
-       ll_unlock_dcache(inode);
+       spin_unlock(&inode->i_lock);
 
        return alias;
 }
@@ -396,7 +393,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
         * when I return
         */
        CDEBUG(D_DENTRY, "it %p it_disposition %x\n", it,
-              it->d.lustre.it_disposition);
+              it->it_disposition);
        if (!it_disposition(it, DISP_LOOKUP_NEG)) {
                rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
                if (rc)
@@ -448,7 +445,7 @@ static int ll_lookup_it_finish(struct ptlrpc_request *request,
                /* Check that parent has UPDATE lock. */
                struct lookup_intent parent_it = {
                                        .it_op = IT_GETATTR,
-                                       .d.lustre.it_lock_handle = 0 };
+                                       .it_lock_handle = 0 };
 
                if (md_revalidate_lock(ll_i2mdexp(parent), &parent_it,
                                       &ll_i2info(parent)->lli_fid, NULL)) {
@@ -625,13 +622,10 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
                if (d_really_is_positive(dentry) && it_disposition(it, DISP_OPEN_OPEN)) {
                        /* Open dentry. */
                        if (S_ISFIFO(d_inode(dentry)->i_mode)) {
-                               /* We cannot call open here as it would
-                                * deadlock.
+                               /* We cannot call open here as it might
+                                * deadlock. This case is unreachable in
+                                * practice because of OBD_CONNECT_NODEVOH.
                                 */
-                               if (it_disposition(it, DISP_ENQ_OPEN_REF))
-                                       ptlrpc_req_finished(
-                                                      (struct ptlrpc_request *)
-                                                         it->d.lustre.it_data);
                                rc = finish_no_open(file, de);
                        } else {
                                file->private_data = it;
@@ -662,10 +656,10 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
        struct ll_sb_info *sbi = ll_i2sbi(dir);
        int rc;
 
-       LASSERT(it && it->d.lustre.it_disposition);
+       LASSERT(it && it->it_disposition);
 
        LASSERT(it_disposition(it, DISP_ENQ_CREATE_REF));
-       request = it->d.lustre.it_data;
+       request = it->it_request;
        it_clear_disposition(it, DISP_ENQ_CREATE_REF);
        rc = ll_prep_inode(&inode, request, dir->i_sb, it);
        if (rc) {
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
deleted file mode 100644 (file)
index e9d2531..0000000
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/llite/remote_perm.c
- *
- * Lustre Permission Cache for Remote Client
- *
- * Author: Lai Siyao <lsy@clusterfs.com>
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include <linux/module.h>
-#include <linux/types.h>
-
-#include "../include/lustre_lite.h"
-#include "../include/lustre_ha.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lprocfs_status.h"
-#include "../include/lustre_disk.h"
-#include "../include/lustre_param.h"
-#include "llite_internal.h"
-
-struct kmem_cache *ll_remote_perm_cachep;
-struct kmem_cache *ll_rmtperm_hash_cachep;
-
-static inline struct ll_remote_perm *alloc_ll_remote_perm(void)
-{
-       struct ll_remote_perm *lrp;
-
-       lrp = kmem_cache_zalloc(ll_remote_perm_cachep, GFP_KERNEL);
-       if (lrp)
-               INIT_HLIST_NODE(&lrp->lrp_list);
-       return lrp;
-}
-
-static inline void free_ll_remote_perm(struct ll_remote_perm *lrp)
-{
-       if (!lrp)
-               return;
-
-       if (!hlist_unhashed(&lrp->lrp_list))
-               hlist_del(&lrp->lrp_list);
-       kmem_cache_free(ll_remote_perm_cachep, lrp);
-}
-
-static struct hlist_head *alloc_rmtperm_hash(void)
-{
-       struct hlist_head *hash;
-       int i;
-
-       hash = kmem_cache_zalloc(ll_rmtperm_hash_cachep, GFP_NOFS);
-       if (!hash)
-               return NULL;
-
-       for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
-               INIT_HLIST_HEAD(hash + i);
-
-       return hash;
-}
-
-void free_rmtperm_hash(struct hlist_head *hash)
-{
-       int i;
-       struct ll_remote_perm *lrp;
-       struct hlist_node *next;
-
-       if (!hash)
-               return;
-
-       for (i = 0; i < REMOTE_PERM_HASHSIZE; i++)
-               hlist_for_each_entry_safe(lrp, next, hash + i, lrp_list)
-                       free_ll_remote_perm(lrp);
-       kmem_cache_free(ll_rmtperm_hash_cachep, hash);
-}
-
-static inline int remote_perm_hashfunc(uid_t uid)
-{
-       return uid & (REMOTE_PERM_HASHSIZE - 1);
-}
-
-/* NB: setxid permission is not checked here, instead it's done on
- * MDT when client get remote permission.
- */
-static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
-{
-       struct hlist_head *head;
-       struct ll_remote_perm *lrp;
-       int found = 0, rc;
-
-       if (!lli->lli_remote_perms)
-               return -ENOENT;
-
-       head = lli->lli_remote_perms +
-               remote_perm_hashfunc(from_kuid(&init_user_ns, current_uid()));
-
-       spin_lock(&lli->lli_lock);
-       hlist_for_each_entry(lrp, head, lrp_list) {
-               if (lrp->lrp_uid != from_kuid(&init_user_ns, current_uid()))
-                       continue;
-               if (lrp->lrp_gid != from_kgid(&init_user_ns, current_gid()))
-                       continue;
-               if (lrp->lrp_fsuid != from_kuid(&init_user_ns, current_fsuid()))
-                       continue;
-               if (lrp->lrp_fsgid != from_kgid(&init_user_ns, current_fsgid()))
-                       continue;
-               found = 1;
-               break;
-       }
-
-       if (!found) {
-               rc = -ENOENT;
-               goto out;
-       }
-
-       CDEBUG(D_SEC, "found remote perm: %u/%u/%u/%u - %#x\n",
-              lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
-              lrp->lrp_access_perm);
-       rc = ((lrp->lrp_access_perm & mask) == mask) ? 0 : -EACCES;
-
-out:
-       spin_unlock(&lli->lli_lock);
-       return rc;
-}
-
-int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
-{
-       struct ll_inode_info *lli = ll_i2info(inode);
-       struct ll_remote_perm *lrp = NULL, *tmp = NULL;
-       struct hlist_head *head, *perm_hash = NULL;
-
-       LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
-
-#if 0
-       if (perm->rp_uid != current->uid ||
-           perm->rp_gid != current->gid ||
-           perm->rp_fsuid != current->fsuid ||
-           perm->rp_fsgid != current->fsgid) {
-               /* user might setxid in this small period */
-               CDEBUG(D_SEC,
-                      "remote perm user %u/%u/%u/%u != current %u/%u/%u/%u\n",
-                      perm->rp_uid, perm->rp_gid, perm->rp_fsuid,
-                      perm->rp_fsgid, current->uid, current->gid,
-                      current->fsuid, current->fsgid);
-               return -EAGAIN;
-       }
-#endif
-
-       if (!lli->lli_remote_perms) {
-               perm_hash = alloc_rmtperm_hash();
-               if (!perm_hash) {
-                       CERROR("alloc lli_remote_perms failed!\n");
-                       return -ENOMEM;
-               }
-       }
-
-       spin_lock(&lli->lli_lock);
-
-       if (!lli->lli_remote_perms)
-               lli->lli_remote_perms = perm_hash;
-       else
-               free_rmtperm_hash(perm_hash);
-
-       head = lli->lli_remote_perms + remote_perm_hashfunc(perm->rp_uid);
-
-again:
-       hlist_for_each_entry(tmp, head, lrp_list) {
-               if (tmp->lrp_uid != perm->rp_uid)
-                       continue;
-               if (tmp->lrp_gid != perm->rp_gid)
-                       continue;
-               if (tmp->lrp_fsuid != perm->rp_fsuid)
-                       continue;
-               if (tmp->lrp_fsgid != perm->rp_fsgid)
-                       continue;
-               free_ll_remote_perm(lrp);
-               lrp = tmp;
-               break;
-       }
-
-       if (!lrp) {
-               spin_unlock(&lli->lli_lock);
-               lrp = alloc_ll_remote_perm();
-               if (!lrp) {
-                       CERROR("alloc memory for ll_remote_perm failed!\n");
-                       return -ENOMEM;
-               }
-               spin_lock(&lli->lli_lock);
-               goto again;
-       }
-
-       lrp->lrp_access_perm = perm->rp_access_perm;
-       if (lrp != tmp) {
-               lrp->lrp_uid     = perm->rp_uid;
-               lrp->lrp_gid     = perm->rp_gid;
-               lrp->lrp_fsuid       = perm->rp_fsuid;
-               lrp->lrp_fsgid       = perm->rp_fsgid;
-               hlist_add_head(&lrp->lrp_list, head);
-       }
-       lli->lli_rmtperm_time = cfs_time_current();
-       spin_unlock(&lli->lli_lock);
-
-       CDEBUG(D_SEC, "new remote perm@%p: %u/%u/%u/%u - %#x\n",
-              lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
-              lrp->lrp_access_perm);
-
-       return 0;
-}
-
-int lustre_check_remote_perm(struct inode *inode, int mask)
-{
-       struct ll_inode_info *lli = ll_i2info(inode);
-       struct ll_sb_info *sbi = ll_i2sbi(inode);
-       struct ptlrpc_request *req = NULL;
-       struct mdt_remote_perm *perm;
-       unsigned long save;
-       int i = 0, rc;
-
-       do {
-               save = lli->lli_rmtperm_time;
-               rc = do_check_remote_perm(lli, mask);
-               if (!rc || (rc != -ENOENT && i))
-                       break;
-
-               might_sleep();
-
-               mutex_lock(&lli->lli_rmtperm_mutex);
-               /* check again */
-               if (save != lli->lli_rmtperm_time) {
-                       rc = do_check_remote_perm(lli, mask);
-                       if (!rc || (rc != -ENOENT && i)) {
-                               mutex_unlock(&lli->lli_rmtperm_mutex);
-                               break;
-                       }
-               }
-
-               if (i++ > 5) {
-                       CERROR("check remote perm falls in dead loop!\n");
-                       LBUG();
-               }
-
-               rc = md_get_remote_perm(sbi->ll_md_exp, ll_inode2fid(inode),
-                                       ll_i2suppgid(inode), &req);
-               if (rc) {
-                       mutex_unlock(&lli->lli_rmtperm_mutex);
-                       break;
-               }
-
-               perm = req_capsule_server_swab_get(&req->rq_pill, &RMF_ACL,
-                                                  lustre_swab_mdt_remote_perm);
-               if (unlikely(!perm)) {
-                       mutex_unlock(&lli->lli_rmtperm_mutex);
-                       rc = -EPROTO;
-                       break;
-               }
-
-               rc = ll_update_remote_perm(inode, perm);
-               mutex_unlock(&lli->lli_rmtperm_mutex);
-               if (rc == -ENOMEM)
-                       break;
-
-               ptlrpc_req_finished(req);
-               req = NULL;
-       } while (1);
-       ptlrpc_req_finished(req);
-       return rc;
-}
-
-#if 0  /* NB: remote perms can't be freed in ll_mdc_blocking_ast of UPDATE lock,
-       * because it will fail sanity test 48.
-       */
-void ll_free_remote_perms(struct inode *inode)
-{
-       struct ll_inode_info *lli = ll_i2info(inode);
-       struct hlist_head *hash = lli->lli_remote_perms;
-       struct ll_remote_perm *lrp;
-       struct hlist_node *node, *next;
-       int i;
-
-       LASSERT(hash);
-
-       spin_lock(&lli->lli_lock);
-
-       for (i = 0; i < REMOTE_PERM_HASHSIZE; i++) {
-               hlist_for_each_entry_safe(lrp, node, next, hash + i, lrp_list)
-                       free_ll_remote_perm(lrp);
-       }
-
-       spin_unlock(&lli->lli_lock);
-}
-#endif
index 336397773fbb5d911c54081624405818237f13fe..87393c4bd51e2a033aec7cf9d022c57831f92ee0 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
 #include "llite_internal.h"
 #include "../include/linux/lustre_compat25.h"
 
-/**
- * Finalizes cl-data before exiting typical address_space operation. Dual to
- * ll_cl_init().
- */
-void ll_cl_fini(struct ll_cl_context *lcc)
-{
-       struct lu_env  *env  = lcc->lcc_env;
-       struct cl_io   *io   = lcc->lcc_io;
-       struct cl_page *page = lcc->lcc_page;
-
-       LASSERT(lcc->lcc_cookie == current);
-       LASSERT(env);
-
-       if (page) {
-               lu_ref_del(&page->cp_reference, "cl_io", io);
-               cl_page_put(env, page);
-       }
-
-       cl_env_put(env, &lcc->lcc_refcheck);
-}
-
-/**
- * Initializes common cl-data at the typical address_space operation entry
- * point.
- */
-struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
-{
-       struct ll_cl_context *lcc;
-       struct lu_env    *env;
-       struct cl_io     *io;
-       struct cl_object *clob;
-       struct vvp_io    *vio;
-
-       int refcheck;
-       int result = 0;
-
-       clob = ll_i2info(file_inode(file))->lli_clob;
-       LASSERT(clob);
-
-       env = cl_env_get(&refcheck);
-       if (IS_ERR(env))
-               return ERR_CAST(env);
-
-       lcc = &ll_env_info(env)->lti_io_ctx;
-       memset(lcc, 0, sizeof(*lcc));
-       lcc->lcc_env = env;
-       lcc->lcc_refcheck = refcheck;
-       lcc->lcc_cookie = current;
-
-       vio = vvp_env_io(env);
-       io = vio->vui_cl.cis_io;
-       lcc->lcc_io = io;
-       if (!io)
-               result = -EIO;
-
-       if (result == 0 && vmpage) {
-               struct cl_page   *page;
-
-               LASSERT(io->ci_state == CIS_IO_GOING);
-               LASSERT(vio->vui_fd == LUSTRE_FPRIVATE(file));
-               page = cl_page_find(env, clob, vmpage->index, vmpage,
-                                   CPT_CACHEABLE);
-               if (!IS_ERR(page)) {
-                       lcc->lcc_page = page;
-                       lu_ref_add(&page->cp_reference, "cl_io", io);
-                       result = 0;
-               } else {
-                       result = PTR_ERR(page);
-               }
-       }
-       if (result) {
-               ll_cl_fini(lcc);
-               lcc = ERR_PTR(result);
-       }
-
-       return lcc;
-}
-
 static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
 
 /**
@@ -1112,17 +1030,70 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
        return result;
 }
 
+struct ll_cl_context *ll_cl_find(struct file *file)
+{
+       struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+       struct ll_cl_context *lcc;
+       struct ll_cl_context *found = NULL;
+
+       read_lock(&fd->fd_lock);
+       list_for_each_entry(lcc, &fd->fd_lccs, lcc_list) {
+               if (lcc->lcc_cookie == current) {
+                       found = lcc;
+                       break;
+               }
+       }
+       read_unlock(&fd->fd_lock);
+
+       return found;
+}
+
+void ll_cl_add(struct file *file, const struct lu_env *env, struct cl_io *io)
+{
+       struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+       struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
+
+       memset(lcc, 0, sizeof(*lcc));
+       INIT_LIST_HEAD(&lcc->lcc_list);
+       lcc->lcc_cookie = current;
+       lcc->lcc_env = env;
+       lcc->lcc_io = io;
+
+       write_lock(&fd->fd_lock);
+       list_add(&lcc->lcc_list, &fd->fd_lccs);
+       write_unlock(&fd->fd_lock);
+}
+
+void ll_cl_remove(struct file *file, const struct lu_env *env)
+{
+       struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+       struct ll_cl_context *lcc = &ll_env_info(env)->lti_io_ctx;
+
+       write_lock(&fd->fd_lock);
+       list_del_init(&lcc->lcc_list);
+       write_unlock(&fd->fd_lock);
+}
+
 int ll_readpage(struct file *file, struct page *vmpage)
 {
+       struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob;
        struct ll_cl_context *lcc;
+       const struct lu_env  *env;
+       struct cl_io   *io;
+       struct cl_page *page;
        int result;
 
-       lcc = ll_cl_init(file, vmpage);
-       if (!IS_ERR(lcc)) {
-               struct lu_env  *env  = lcc->lcc_env;
-               struct cl_io   *io   = lcc->lcc_io;
-               struct cl_page *page = lcc->lcc_page;
+       lcc = ll_cl_find(file);
+       if (!lcc) {
+               unlock_page(vmpage);
+               return -EIO;
+       }
 
+       env = lcc->lcc_env;
+       io = lcc->lcc_io;
+       LASSERT(io->ci_state == CIS_IO_GOING);
+       page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+       if (!IS_ERR(page)) {
                LASSERT(page->cp_type == CPT_CACHEABLE);
                if (likely(!PageUptodate(vmpage))) {
                        cl_page_assume(env, io, page);
@@ -1132,10 +1103,10 @@ int ll_readpage(struct file *file, struct page *vmpage)
                        unlock_page(vmpage);
                        result = 0;
                }
-               ll_cl_fini(lcc);
+               cl_page_put(env, page);
        } else {
                unlock_page(vmpage);
-               result = PTR_ERR(lcc);
+               result = PTR_ERR(page);
        }
        return result;
 }
index c12a048fce59c69430448f368b726a8b3bf5718d..d98c7acc0832518287a3bccee2b5c36482ed55b7 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -489,7 +485,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
                          struct page **pagep, void **fsdata)
 {
        struct ll_cl_context *lcc;
-       struct lu_env  *env;
+       const struct lu_env  *env;
        struct cl_io   *io;
        struct cl_page *page;
        struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
@@ -501,9 +497,9 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
 
        CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
 
-       lcc = ll_cl_init(file, NULL);
-       if (IS_ERR(lcc)) {
-               result = PTR_ERR(lcc);
+       lcc = ll_cl_find(file);
+       if (!lcc) {
+               result = -EIO;
                goto out;
        }
 
@@ -579,8 +575,6 @@ out:
                        unlock_page(vmpage);
                        put_page(vmpage);
                }
-               if (!IS_ERR(lcc))
-                       ll_cl_fini(lcc);
        } else {
                *pagep = vmpage;
                *fsdata = lcc;
@@ -593,7 +587,7 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
                        struct page *vmpage, void *fsdata)
 {
        struct ll_cl_context *lcc = fsdata;
-       struct lu_env *env;
+       const struct lu_env *env;
        struct cl_io *io;
        struct vvp_io *vio;
        struct cl_page *page;
@@ -631,6 +625,10 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
        } else {
                cl_page_disown(env, io, page);
 
+               lcc->lcc_page = NULL;
+               lu_ref_del(&page->cp_reference, "cl_io", io);
+               cl_page_put(env, page);
+
                /* page list is not contiguous now, commit it now */
                unplug = true;
        }
@@ -639,7 +637,6 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
            file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
                result = vvp_io_write_commit(env, io);
 
-       ll_cl_fini(lcc);
        return result >= 0 ? copied : result;
 }
 
index 6322f88661e8c536c7a45ebfd6e03346590bd005..f77524294c27cb028f85999953dff15e065688ad 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -650,7 +646,7 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
                }
        }
 
-       it->d.lustre.it_lock_handle = entry->se_handle;
+       it->it_lock_handle = entry->se_handle;
        rc = md_revalidate_lock(ll_i2mdexp(dir), it, ll_inode2fid(dir), NULL);
        if (rc != 1) {
                rc = -EAGAIN;
@@ -704,7 +700,7 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
                 * process enqueues lock on child with parent lock held, eg.
                 * unlink.
                 */
-               handle = it->d.lustre.it_lock_handle;
+               handle = it->it_lock_handle;
                ll_intent_drop_lock(it);
        }
 
@@ -854,7 +850,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
 {
        struct inode         *inode = d_inode(dentry);
        struct lookup_intent      it = { .it_op = IT_GETATTR,
-                                        .d.lustre.it_lock_handle = 0 };
+                                        .it_lock_handle = 0 };
        struct md_enqueue_info   *minfo;
        struct ldlm_enqueue_info *einfo;
        int rc;
@@ -869,7 +865,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
        rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),
                                NULL);
        if (rc == 1) {
-               entry->se_handle = it.d.lustre.it_lock_handle;
+               entry->se_handle = it.it_lock_handle;
                ll_intent_release(&it);
                return 1;
        }
@@ -1573,7 +1569,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
                if (entry->se_stat == SA_ENTRY_SUCC && entry->se_inode) {
                        struct inode *inode = entry->se_inode;
                        struct lookup_intent it = { .it_op = IT_GETATTR,
-                                                   .d.lustre.it_lock_handle =
+                                                   .it_lock_handle =
                                                     entry->se_handle };
                        __u64 bits;
 
index 415750b0bff4618c0f70c956fb972a11147272b8..3dd7e0eb0b54a1e8de55ab256d921c1ee8745549 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -118,19 +114,6 @@ static int __init lustre_init(void)
        if (!ll_file_data_slab)
                goto out_cache;
 
-       ll_remote_perm_cachep = kmem_cache_create("ll_remote_perm_cache",
-                                                 sizeof(struct ll_remote_perm),
-                                                     0, 0, NULL);
-       if (!ll_remote_perm_cachep)
-               goto out_cache;
-
-       ll_rmtperm_hash_cachep = kmem_cache_create("ll_rmtperm_hash_cache",
-                                                  REMOTE_PERM_HASHSIZE *
-                                                  sizeof(struct list_head),
-                                                  0, 0, NULL);
-       if (!ll_rmtperm_hash_cachep)
-               goto out_cache;
-
        llite_root = debugfs_create_dir("llite", debugfs_lustre_root);
        if (IS_ERR_OR_NULL(llite_root)) {
                rc = llite_root ? PTR_ERR(llite_root) : -ENOMEM;
@@ -194,8 +177,6 @@ out_debugfs:
 out_cache:
        kmem_cache_destroy(ll_inode_cachep);
        kmem_cache_destroy(ll_file_data_slab);
-       kmem_cache_destroy(ll_remote_perm_cachep);
-       kmem_cache_destroy(ll_rmtperm_hash_cachep);
        return rc;
 }
 
@@ -213,10 +194,6 @@ static void __exit lustre_exit(void)
        vvp_global_fini();
 
        kmem_cache_destroy(ll_inode_cachep);
-       kmem_cache_destroy(ll_rmtperm_hash_cachep);
-
-       kmem_cache_destroy(ll_remote_perm_cachep);
-
        kmem_cache_destroy(ll_file_data_slab);
 }
 
index 3fc736ccf85e7def5c747fffc9a250b6154d3339..8c8bdfe1ad71c1257e01d1e15bdaff748823b554 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 47101de1c020eb69a7619b5273ab2c5a4c27a6a7..e623216e962dd2a5035928013904a455894168f9 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -150,8 +146,8 @@ struct lu_context_key vvp_session_key = {
        .lct_fini = vvp_session_key_fini
 };
 
-void *vvp_thread_key_init(const struct lu_context *ctx,
-                         struct lu_context_key *key)
+static void *vvp_thread_key_init(const struct lu_context *ctx,
+                                struct lu_context_key *key)
 {
        struct vvp_thread_info *vti;
 
@@ -161,8 +157,8 @@ void *vvp_thread_key_init(const struct lu_context *ctx,
        return vti;
 }
 
-void vvp_thread_key_fini(const struct lu_context *ctx,
-                        struct lu_context_key *key, void *data)
+static void vvp_thread_key_fini(const struct lu_context *ctx,
+                               struct lu_context_key *key, void *data)
 {
        struct vvp_thread_info *vti = data;
 
@@ -564,7 +560,7 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
 
        env = cl_env_get(&refcheck);
        if (!IS_ERR(env)) {
-               pos = *(loff_t *) v;
+               pos = *(loff_t *)v;
                vvp_pgcache_id_unpack(pos, &id);
                sbi = f->private;
                clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
index 27b9b0a01f327ebd3c5e823c7912b02a27b860ae..79fc428461ededdb0669d8c7c6ddcc3bcc43869c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 5bf9592ae5d264e37bdfe82762da756522fd0a17..94916dcc6caabd3bd079081a1cb0662070037270 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -47,8 +43,8 @@
 #include "llite_internal.h"
 #include "vvp_internal.h"
 
-struct vvp_io *cl2vvp_io(const struct lu_env *env,
-                        const struct cl_io_slice *slice)
+static struct vvp_io *cl2vvp_io(const struct lu_env *env,
+                               const struct cl_io_slice *slice)
 {
        struct vvp_io *vio;
 
@@ -954,7 +950,8 @@ static int vvp_io_write_start(const struct lu_env *env,
                 * out-of-order writes.
                 */
                ll_merge_attr(env, inode);
-               pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
+               pos = i_size_read(inode);
+               io->u.ci_wr.wr.crw_pos = pos;
                vio->vui_iocb->ki_pos = pos;
        } else {
                LASSERT(vio->vui_iocb->ki_pos == pos);
@@ -1259,7 +1256,7 @@ static int vvp_io_read_page(const struct lu_env *env,
        return 0;
 }
 
-void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
+static void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
 {
        CLOBINVRNT(env, ios->cis_io->ci_obj,
                   vvp_object_invariant(ios->cis_io->ci_obj));
index f5bd6c22e11287aa924fb713a6292f18c054ec0c..64be0c9df35b12258ac60feaebc0ed7275e4c8d0 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 18c9df7ebddaa28903ec137172f7b56e34957fab..2c520b0bf6caa9df6477a850ca3f109b8c4409d4 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 6cd2af7a958fa3ef113159ec2482e098402fde8b..2e566d90bb94d53f2b121f275e6cbe57acafd122 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index fb886291a4e2366b68d80df59718a7f3b1cf08a7..9fe9d6c0a7d1499de1ba063a727b84c7663c0e44 100644 (file)
@@ -60,10 +60,10 @@ static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
  *    - o_ioepoch,
  *
  */
-void vvp_req_attr_set(const struct lu_env *env,
-                     const struct cl_req_slice *slice,
-                     const struct cl_object *obj,
-                     struct cl_req_attr *attr, u64 flags)
+static void vvp_req_attr_set(const struct lu_env *env,
+                            const struct cl_req_slice *slice,
+                            const struct cl_object *obj,
+                            struct cl_req_attr *attr, u64 flags)
 {
        struct inode *inode;
        struct obdo  *oa;
@@ -87,8 +87,8 @@ void vvp_req_attr_set(const struct lu_env *env,
               JOBSTATS_JOBID_SIZE);
 }
 
-void vvp_req_completion(const struct lu_env *env,
-                       const struct cl_req_slice *slice, int ioret)
+static void vvp_req_completion(const struct lu_env *env,
+                              const struct cl_req_slice *slice, int ioret)
 {
        struct vvp_req *vrq;
 
index 608014b0dbcd6e33415680349e73fd6dbff54e03..98303cf8581555e0c553dc025c5cbafea5bd7220 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -111,11 +107,6 @@ int ll_setxattr_common(struct inode *inode, const char *name,
        struct ll_sb_info *sbi = ll_i2sbi(inode);
        struct ptlrpc_request *req = NULL;
        int xattr_type, rc;
-#ifdef CONFIG_FS_POSIX_ACL
-       struct rmtacl_ctl_entry *rce = NULL;
-       posix_acl_xattr_header *new_value = NULL;
-       ext_acl_xattr_header *acl = NULL;
-#endif
        const char *pv = value;
 
        xattr_type = get_xattr_type(name);
@@ -143,62 +134,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
            strcmp(name, "security.selinux") == 0)
                return -EOPNOTSUPP;
 
-#ifdef CONFIG_FS_POSIX_ACL
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
-           (xattr_type == XATTR_ACL_ACCESS_T ||
-           xattr_type == XATTR_ACL_DEFAULT_T)) {
-               rce = rct_search(&sbi->ll_rct, current_pid());
-               if (!rce ||
-                   (rce->rce_ops != RMT_LSETFACL &&
-                   rce->rce_ops != RMT_RSETFACL))
-                       return -EOPNOTSUPP;
-
-               if (rce->rce_ops == RMT_LSETFACL) {
-                       struct eacl_entry *ee;
-
-                       ee = et_search_del(&sbi->ll_et, current_pid(),
-                                          ll_inode2fid(inode), xattr_type);
-                       if (valid & OBD_MD_FLXATTR) {
-                               acl = lustre_acl_xattr_merge2ext(
-                                               (posix_acl_xattr_header *)value,
-                                               size, ee->ee_acl);
-                               if (IS_ERR(acl)) {
-                                       ee_free(ee);
-                                       return PTR_ERR(acl);
-                               }
-                               size =  CFS_ACL_XATTR_SIZE(\
-                                               le32_to_cpu(acl->a_count), \
-                                               ext_acl_xattr);
-                               pv = (const char *)acl;
-                       }
-                       ee_free(ee);
-               } else if (rce->rce_ops == RMT_RSETFACL) {
-                       rc = lustre_posix_acl_xattr_filter(
-                                               (posix_acl_xattr_header *)value,
-                                               size, &new_value);
-                       if (unlikely(rc < 0))
-                               return rc;
-                       size = rc;
-
-                       pv = (const char *)new_value;
-               } else {
-                       return -EOPNOTSUPP;
-               }
-
-               valid |= rce_ops2valid(rce->rce_ops);
-       }
-#endif
        rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
                         valid, name, pv, size, 0, flags,
                         ll_i2suppgid(inode), &req);
-#ifdef CONFIG_FS_POSIX_ACL
-       /*
-        * Release the posix ACL space.
-        */
-       kfree(new_value);
-       if (acl)
-               lustre_ext_acl_xattr_free(acl);
-#endif
        if (rc) {
                if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
                        LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
@@ -288,7 +226,6 @@ int ll_getxattr_common(struct inode *inode, const char *name,
        struct mdt_body *body;
        int xattr_type, rc;
        void *xdata;
-       struct rmtacl_ctl_entry *rce = NULL;
        struct ll_inode_info *lli = ll_i2info(inode);
 
        CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
@@ -319,24 +256,11 @@ int ll_getxattr_common(struct inode *inode, const char *name,
                return -EOPNOTSUPP;
 
 #ifdef CONFIG_FS_POSIX_ACL
-       if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
-           (xattr_type == XATTR_ACL_ACCESS_T ||
-           xattr_type == XATTR_ACL_DEFAULT_T)) {
-               rce = rct_search(&sbi->ll_rct, current_pid());
-               if (!rce ||
-                   (rce->rce_ops != RMT_LSETFACL &&
-                   rce->rce_ops != RMT_LGETFACL &&
-                   rce->rce_ops != RMT_RSETFACL &&
-                   rce->rce_ops != RMT_RGETFACL))
-                       return -EOPNOTSUPP;
-       }
-
        /* posix acl is under protection of LOOKUP lock. when calling to this,
         * we just have path resolution to the target inode, so we have great
         * chance that cached ACL is uptodate.
         */
-       if (xattr_type == XATTR_ACL_ACCESS_T &&
-           !(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
+       if (xattr_type == XATTR_ACL_ACCESS_T) {
                struct posix_acl *acl;
 
                spin_lock(&lli->lli_lock);
@@ -378,9 +302,7 @@ do_getxattr:
        } else {
 getxattr_nocache:
                rc = md_getxattr(sbi->ll_md_exp, ll_inode2fid(inode),
-                               valid | (rce ? rce_ops2valid(rce->rce_ops) : 0),
-                               name, NULL, 0, size, 0, &req);
-
+                                valid, name, NULL, 0, size, 0, &req);
                if (rc < 0)
                        goto out_xattr;
 
@@ -417,25 +339,6 @@ getxattr_nocache:
                rc = body->eadatasize;
        }
 
-#ifdef CONFIG_FS_POSIX_ACL
-       if (rce && rce->rce_ops == RMT_LSETFACL) {
-               ext_acl_xattr_header *acl;
-
-               acl = lustre_posix_acl_xattr_2ext(buffer, rc);
-               if (IS_ERR(acl)) {
-                       rc = PTR_ERR(acl);
-                       goto out;
-               }
-
-               rc = ee_add(&sbi->ll_et, current_pid(), ll_inode2fid(inode),
-                           xattr_type, acl);
-               if (unlikely(rc < 0)) {
-                       lustre_ext_acl_xattr_free(acl);
-                       goto out;
-               }
-       }
-#endif
-
 out_xattr:
        if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
                LCONSOLE_INFO(
index d7e17abbe361d1b2d7e9a78771758b8f128de903..8089da8143d9c6d2643b63b3e2c8da6a85c418ac 100644 (file)
@@ -288,8 +288,8 @@ static int ll_xattr_find_get_lock(struct inode *inode,
                                       LCK_PR);
                if (mode != 0) {
                        /* fake oit in mdc_revalidate_lock() manner */
-                       oit->d.lustre.it_lock_handle = lockh.cookie;
-                       oit->d.lustre.it_lock_mode = mode;
+                       oit->it_lock_handle = lockh.cookie;
+                       oit->it_lock_mode = mode;
                        goto out;
                }
        }
@@ -315,7 +315,7 @@ static int ll_xattr_find_get_lock(struct inode *inode,
                return rc;
        }
 
-       *req = (struct ptlrpc_request *)oit->d.lustre.it_data;
+       *req = oit->it_request;
 out:
        down_write(&lli->lli_xattrs_list_rwsem);
        mutex_unlock(&lli->lli_xattrs_enq_lock);
@@ -362,10 +362,10 @@ static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
                goto out_maybe_drop;
        }
 
-       if (oit->d.lustre.it_status < 0) {
+       if (oit->it_status < 0) {
                CDEBUG(D_CACHE, "getxattr intent returned %d for fid "DFID"\n",
-                      oit->d.lustre.it_status, PFID(ll_inode2fid(inode)));
-               rc = oit->d.lustre.it_status;
+                      oit->it_status, PFID(ll_inode2fid(inode)));
+               rc = oit->it_status;
                /* xattr data is so large that we don't want to cache it */
                if (rc == -ERANGE)
                        rc = -EAGAIN;
@@ -448,8 +448,8 @@ out_destroy:
        up_write(&lli->lli_xattrs_list_rwsem);
 
        ldlm_lock_decref_and_cancel((struct lustre_handle *)
-                                       &oit->d.lustre.it_lock_handle,
-                                       oit->d.lustre.it_lock_mode);
+                                       &oit->it_lock_handle,
+                                       oit->it_lock_mode);
 
        goto out_no_unlock;
 }
index 378691b2a06238adb059a977ef3071a4c5269108..a3d170aa6fd274bbcd6e4fd995e938aef85928ac 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e0958eaed0547528b0ba016a63623caed0d25f39..2f58fdab8d1e33845ef55ea3d0485a7a608a1735 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -84,11 +80,11 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
        /*
         * We got LOOKUP lock, but we really need attrs.
         */
-       pmode = it->d.lustre.it_lock_mode;
+       pmode = it->it_lock_mode;
        if (pmode) {
-               plock.cookie = it->d.lustre.it_lock_handle;
-               it->d.lustre.it_lock_mode = 0;
-               it->d.lustre.it_data = NULL;
+               plock.cookie = it->it_lock_handle;
+               it->it_lock_mode = 0;
+               it->it_request = NULL;
        }
 
        LASSERT(fid_is_sane(&body->fid1));
@@ -134,14 +130,14 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
         * maintain dcache consistency. Thus drop UPDATE|PERM lock here
         * and put LOOKUP in request.
         */
-       if (it->d.lustre.it_lock_mode != 0) {
-               it->d.lustre.it_remote_lock_handle =
-                                       it->d.lustre.it_lock_handle;
-               it->d.lustre.it_remote_lock_mode = it->d.lustre.it_lock_mode;
+       if (it->it_lock_mode != 0) {
+               it->it_remote_lock_handle =
+                                       it->it_lock_handle;
+               it->it_remote_lock_mode = it->it_lock_mode;
        }
 
-       it->d.lustre.it_lock_handle = plock.cookie;
-       it->d.lustre.it_lock_mode = pmode;
+       it->it_lock_handle = plock.cookie;
+       it->it_lock_mode = pmode;
 
 out_free_op_data:
        kfree(op_data);
@@ -201,9 +197,9 @@ static int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
         * Nothing is found, do not access body->fid1 as it is zero and thus
         * pointless.
         */
-       if ((it->d.lustre.it_disposition & DISP_LOOKUP_NEG) &&
-           !(it->d.lustre.it_disposition & DISP_OPEN_CREATE) &&
-           !(it->d.lustre.it_disposition & DISP_OPEN_OPEN))
+       if ((it->it_disposition & DISP_LOOKUP_NEG) &&
+           !(it->it_disposition & DISP_OPEN_CREATE) &&
+           !(it->it_disposition & DISP_OPEN_OPEN))
                return rc;
 
        body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
index 7007e4c4803524ea5ee01bb12344c07def5923bb..0beafc49b8d2c9a3a8d6777f1dc3caf1f6f969b7 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 9e31f6b03f9e5a0f470f2ff64a09c58bf138e67e..0e1588a43187ac8207f1d7fd3b8ab53e15158ab7 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -1683,7 +1679,7 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
                   struct lustre_handle *lockh, void *lmm, int lmmsize,
                   __u64 extra_lock_flags)
 {
-       struct ptlrpc_request      *req = it->d.lustre.it_data;
+       struct ptlrpc_request      *req = it->it_request;
        struct obd_device         *obd = exp->exp_obd;
        struct lmv_obd       *lmv = &obd->u.lmv;
        struct lustre_handle    plock;
@@ -1705,11 +1701,11 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
        /*
         * We got LOOKUP lock, but we really need attrs.
         */
-       pmode = it->d.lustre.it_lock_mode;
+       pmode = it->it_lock_mode;
        LASSERT(pmode != 0);
        memcpy(&plock, lockh, sizeof(plock));
-       it->d.lustre.it_lock_mode = 0;
-       it->d.lustre.it_data = NULL;
+       it->it_lock_mode = 0;
+       it->it_request = NULL;
        fid1 = body->fid1;
 
        ptlrpc_req_finished(req);
@@ -2611,27 +2607,6 @@ static int lmv_clear_open_replay_data(struct obd_export *exp,
        return md_clear_open_replay_data(tgt->ltd_exp, och);
 }
 
-static int lmv_get_remote_perm(struct obd_export *exp,
-                              const struct lu_fid *fid,
-                              __u32 suppgid, struct ptlrpc_request **request)
-{
-       struct obd_device       *obd = exp->exp_obd;
-       struct lmv_obd    *lmv = &obd->u.lmv;
-       struct lmv_tgt_desc     *tgt;
-       int                   rc;
-
-       rc = lmv_check_connect(obd);
-       if (rc)
-               return rc;
-
-       tgt = lmv_find_target(lmv, fid);
-       if (IS_ERR(tgt))
-               return PTR_ERR(tgt);
-
-       rc = md_get_remote_perm(tgt->ltd_exp, fid, suppgid, request);
-       return rc;
-}
-
 static int lmv_intent_getattr_async(struct obd_export *exp,
                                    struct md_enqueue_info *minfo,
                                    struct ldlm_enqueue_info *einfo)
@@ -2686,7 +2661,7 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
        struct lmv_obd      *lmv = &obd->u.lmv;
        struct lmv_tgt_desc *tgt = lmv->tgts[0];
        int               rc = 0, i;
-       __u64           curspace, curinodes;
+       __u64 curspace = 0, curinodes = 0;
 
        if (!tgt || !tgt->ltd_exp || !tgt->ltd_active ||
            !lmv->desc.ld_tgt_count) {
@@ -2699,7 +2674,6 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
                return rc;
        }
 
-       curspace = curinodes = 0;
        for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
                int err;
 
@@ -2796,7 +2770,6 @@ static struct md_ops lmv_md_ops = {
        .free_lustre_md         = lmv_free_lustre_md,
        .set_open_replay_data   = lmv_set_open_replay_data,
        .clear_open_replay_data = lmv_clear_open_replay_data,
-       .get_remote_perm        = lmv_get_remote_perm,
        .intent_getattr_async   = lmv_intent_getattr_async,
        .revalidate_lock        = lmv_revalidate_lock
 };
index b39e364a29ab038d36757f0e64f5754c6c75f218..c29c361eb0cc3da702f26987bcc138d5ad0c8ea2 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index ac9744e887ae09b3c2f6455a14bc0d2e3ebf0975..9740568d9521dc97b370e3f218ca461f61092a64 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index dae8e89bcf6dd8a687fab413dbc1a4900dd1329b..b1f260d43bc7587619a1efdcac81abc0f2859a12 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 460f0fa5e6b10bbbd6a4cd77cf59b51d0f9da445..5053dead17bb5da8d0f81d00259f90c248bbd75b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index eef9afac846713734b6bd0c4d893b3a35d758839..12bd511e8988fa9ec0c94f876b902ff0d3fc0345 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 86cb3f8f924688c6bb5331a65bc39272397e9805..84032a51025423aa78ee2786e307b2e3c183ea49 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 1b203d18c6e939d0bfe5f275c809240c7ccc1ae2..f3a0583f28f58eb6336840e229a686b8ea635ef1 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 56ef41d17ad7f148d8e418425bab042e9a42b7ba..b9c90865fdfcd1829aeb67130300addd9ffd1c16 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e15ef2ece893c68e32fef50b898f3bf2f4163efe..9b92d5522edb6b5e32af0e8b91810942f309f840 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -896,6 +892,12 @@ static int lov_cleanup(struct obd_device *obd)
                kfree(lov->lov_tgts);
                lov->lov_tgt_size = 0;
        }
+
+       if (lov->lov_cache) {
+               cl_cache_decref(lov->lov_cache);
+               lov->lov_cache = NULL;
+       }
+
        return 0;
 }
 
@@ -1772,7 +1774,8 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
        fm_start = fiemap->fm_start;
        fm_length = fiemap->fm_length;
        /* Calculate start stripe, last stripe and length of mapping */
-       actual_start_stripe = start_stripe = lov_stripe_number(lsm, fm_start);
+       start_stripe = lov_stripe_number(lsm, fm_start);
+       actual_start_stripe = start_stripe;
        fm_end = (fm_length == ~0ULL ? fm_key->oa.o_size :
                                                fm_start + fm_length - 1);
        /* If fm_length != ~0ULL but fm_start+fm_length-1 exceeds file size */
@@ -2095,11 +2098,9 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
        u32 count;
        int i, rc = 0, err;
        struct lov_tgt_desc *tgt;
-       unsigned incr, check_uuid,
-                do_inactive, no_set;
-       unsigned next_id = 0,  mds_con = 0;
+       unsigned int incr = 0, check_uuid = 0, do_inactive = 0, no_set = 0;
+       unsigned int next_id = 0, mds_con = 0;
 
-       incr = check_uuid = do_inactive = no_set = 0;
        if (!set) {
                no_set = 1;
                set = ptlrpc_prep_set();
@@ -2126,6 +2127,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
                LASSERT(!lov->lov_cache);
                lov->lov_cache = val;
                do_inactive = 1;
+               cl_cache_incref(lov->lov_cache);
        }
 
        for (i = 0; i < count; i++, val = (char *)val + incr) {
index 561d493b2cdfa3d6047f91523fcb75b52361fcf6..f9621b0fd469ea2d9a13642eada7618d406d33f2 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -185,8 +181,8 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
                }
 
                LU_OBJECT_DEBUG(mask, env, &stripe->co_lu,
-                               "stripe %d is already owned.\n", idx);
-               LU_OBJECT_DEBUG(mask, env, old_obj, "owned.\n");
+                               "stripe %d is already owned.", idx);
+               LU_OBJECT_DEBUG(mask, env, old_obj, "owned.");
                LU_OBJECT_HEADER(mask, env, lov2lu(lov), "try to own.\n");
                cl_object_put(env, stripe);
        }
index 9302f06c34ef57154039e651678cf057f2aa4a95..ecca74fbff001f4f228e5240cee88ad3ad679412 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -74,7 +70,7 @@ pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
 {
        loff_t offset;
 
-       offset = lov_stripe_size(lsm, stripe_index << PAGE_SHIFT, stripe);
+       offset = lov_stripe_size(lsm, (stripe_index << PAGE_SHIFT) + 1, stripe);
        return offset >> PAGE_SHIFT;
 }
 
index 0215ea54df8db83b8a934d3932fe8e9d61c36dfc..869ef41b13ca25aac16b28346345d86b32daa71c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 0306f00c3f3347d22ec8b3c6cbc2df957e48770a..c17026f14896ba5438951ab2429958123b978b67 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 690292ecebdc0f52d1a2d8e0b6805cd000fd9eaf..4c2d21729589c323263d892530d5b93948b4c179 100644 (file)
  * in the LICENSE file that accompanied this code).
  *
  * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * version 2 along with this program; If not, see
+ * http://http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 1be4b921c01fde2918e68bbdf2df660c8e30b6aa..4099b51f826ed6e6e3974cda66a82201b4d00db7 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 35f6b1d66ff4af01f89dc25c2b5fa80e10f2a76b..b519a1940e1e691ec86712ffa4c5c128723431b7 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 783ec687a4e7374190467f03b3249a14fd21b47f..6a9820218a3ee7fe008457e8e309f473b0dbb87b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e92edfb618b753f1f9c12148385b66cd42f7fc5f..38f9b735c2418f24cabcd8a437ddb158cad5d968 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index bcaae1e5b840cd96321f6395d3fda7cf1b82b709..fb2f2660b3e90d9b06d3c2501e804cf62afddafb 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 9badedcce2bfe20352481922e08d7ac55e585a17..b2e68c3e820d5499fc67eba38b9c71a28b196e3c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 0dcb6b6a778235d835c86775ca348595731b9b12..eb6d30d34e3a7f71adc6ac15816dd08728d84fe0 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 5c7a15dd7bd2d1ef08fc930c411d3e358158fe07..98d15fb247bc48748d3391aa2b0da8dba696a1a2 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index c5519aeb0d8a592ecded53b9fe32d4b610668118..58f2841cabe4c5ff75ba15d53c727056927a7ea5 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 856c54e03b6b32b491a55f783bac86d2cf323c6b..143bd7628572f87a0a6c62589bdd1188b1a7fef3 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -471,6 +467,18 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
        rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
 
        mdc_setattr_pack_rec(rec, op_data);
+       /*
+        * The client will zero out local timestamps when losing the IBITS lock
+        * so any new RPC timestamps will update the client inode's timestamps.
+        * There was a defect on the server side which allowed the atime to be
+        * overwritten by a zeroed-out atime packed into the close RPC.
+        *
+        * Proactively clear the MDS_ATTR_ATIME flag in the RPC in this case
+        * to avoid zeroing the atime on old unpatched servers.  See LU-8041.
+        */
+       if (rec->sa_atime == 0)
+               rec->sa_valid &= ~MDS_ATTR_ATIME;
+
        mdc_ioepoch_pack(epoch, op_data);
        mdc_hsm_release_pack(req, op_data);
 }
index 3b1bc9111b93f2ce8aad314a52c828b5d09e23c5..f48b584233071bc17d282ab9bc5d6afeb8bc7087 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -54,61 +50,43 @@ struct mdc_getattr_args {
        struct ldlm_enqueue_info    *ga_einfo;
 };
 
-int it_disposition(struct lookup_intent *it, int flag)
-{
-       return it->d.lustre.it_disposition & flag;
-}
-EXPORT_SYMBOL(it_disposition);
-
-void it_set_disposition(struct lookup_intent *it, int flag)
-{
-       it->d.lustre.it_disposition |= flag;
-}
-EXPORT_SYMBOL(it_set_disposition);
-
-void it_clear_disposition(struct lookup_intent *it, int flag)
-{
-       it->d.lustre.it_disposition &= ~flag;
-}
-EXPORT_SYMBOL(it_clear_disposition);
-
 int it_open_error(int phase, struct lookup_intent *it)
 {
        if (it_disposition(it, DISP_OPEN_LEASE)) {
                if (phase >= DISP_OPEN_LEASE)
-                       return it->d.lustre.it_status;
+                       return it->it_status;
                else
                        return 0;
        }
        if (it_disposition(it, DISP_OPEN_OPEN)) {
                if (phase >= DISP_OPEN_OPEN)
-                       return it->d.lustre.it_status;
+                       return it->it_status;
                else
                        return 0;
        }
 
        if (it_disposition(it, DISP_OPEN_CREATE)) {
                if (phase >= DISP_OPEN_CREATE)
-                       return it->d.lustre.it_status;
+                       return it->it_status;
                else
                        return 0;
        }
 
        if (it_disposition(it, DISP_LOOKUP_EXECD)) {
                if (phase >= DISP_LOOKUP_EXECD)
-                       return it->d.lustre.it_status;
+                       return it->it_status;
                else
                        return 0;
        }
 
        if (it_disposition(it, DISP_IT_EXECD)) {
                if (phase >= DISP_IT_EXECD)
-                       return it->d.lustre.it_status;
+                       return it->it_status;
                else
                        return 0;
        }
-       CERROR("it disp: %X, status: %d\n", it->d.lustre.it_disposition,
-              it->d.lustre.it_status);
+       CERROR("it disp: %X, status: %d\n", it->it_disposition,
+              it->it_status);
        LBUG();
        return 0;
 }
@@ -347,10 +325,6 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
        mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm,
                      lmmsize);
 
-       /* for remote client, fetch remote perm for current user */
-       if (client_is_remote(exp))
-               req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
-                                    sizeof(struct mdt_remote_perm));
        ptlrpc_request_set_replen(req);
        return req;
 }
@@ -444,9 +418,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
        struct obd_device     *obddev = class_exp2obd(exp);
        u64                    valid = OBD_MD_FLGETATTR | OBD_MD_FLEASIZE |
                                       OBD_MD_FLMODEASIZE | OBD_MD_FLDIREA |
-                                      OBD_MD_MEA |
-                                      (client_is_remote(exp) ?
-                                              OBD_MD_FLRMTPERM : OBD_MD_FLACL);
+                                      OBD_MD_MEA | OBD_MD_FLACL;
        struct ldlm_intent    *lit;
        int                 rc;
        int                 easize;
@@ -478,9 +450,6 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
        mdc_getattr_pack(req, valid, it->it_flags, op_data, easize);
 
        req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, easize);
-       if (client_is_remote(exp))
-               req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
-                                    sizeof(struct mdt_remote_perm));
        ptlrpc_request_set_replen(req);
        return req;
 }
@@ -555,7 +524,6 @@ static int mdc_finish_enqueue(struct obd_export *exp,
        struct req_capsule  *pill = &req->rq_pill;
        struct ldlm_request *lockreq;
        struct ldlm_reply   *lockrep;
-       struct lustre_intent_data *intent = &it->d.lustre;
        struct ldlm_lock    *lock;
        void            *lvb_data = NULL;
        int               lvb_len = 0;
@@ -589,17 +557,17 @@ static int mdc_finish_enqueue(struct obd_export *exp,
 
        lockrep = req_capsule_server_get(pill, &RMF_DLM_REP);
 
-       intent->it_disposition = (int)lockrep->lock_policy_res1;
-       intent->it_status = (int)lockrep->lock_policy_res2;
-       intent->it_lock_mode = einfo->ei_mode;
-       intent->it_lock_handle = lockh->cookie;
-       intent->it_data = req;
+       it->it_disposition = (int)lockrep->lock_policy_res1;
+       it->it_status = (int)lockrep->lock_policy_res2;
+       it->it_lock_mode = einfo->ei_mode;
+       it->it_lock_handle = lockh->cookie;
+       it->it_request = req;
 
        /* Technically speaking rq_transno must already be zero if
         * it_status is in error, so the check is a bit redundant
         */
-       if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay)
-               mdc_clear_replay_flag(req, intent->it_status);
+       if ((!req->rq_transno || it->it_status < 0) && req->rq_replay)
+               mdc_clear_replay_flag(req, it->it_status);
 
        /* If we're doing an IT_OPEN which did not result in an actual
         * successful open, then we need to remove the bit which saves
@@ -610,11 +578,11 @@ static int mdc_finish_enqueue(struct obd_export *exp,
         * (bug 3440)
         */
        if (it->it_op & IT_OPEN && req->rq_replay &&
-           (!it_disposition(it, DISP_OPEN_OPEN) || intent->it_status != 0))
-               mdc_clear_replay_flag(req, intent->it_status);
+           (!it_disposition(it, DISP_OPEN_OPEN) || it->it_status != 0))
+               mdc_clear_replay_flag(req, it->it_status);
 
        DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d",
-                 it->it_op, intent->it_disposition, intent->it_status);
+                 it->it_op, it->it_disposition, it->it_status);
 
        /* We know what to expect, so we do any byte flipping required here */
        if (it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR)) {
@@ -687,16 +655,6 @@ static int mdc_finish_enqueue(struct obd_export *exp,
                                        memcpy(lmm, eadata, body->eadatasize);
                        }
                }
-
-               if (body->valid & OBD_MD_FLRMTPERM) {
-                       struct mdt_remote_perm *perm;
-
-                       LASSERT(client_is_remote(exp));
-                       perm = req_capsule_server_swab_get(pill, &RMF_ACL,
-                                               lustre_swab_mdt_remote_perm);
-                       if (!perm)
-                               return -EPROTO;
-               }
        } else if (it->it_op & IT_LAYOUT) {
                /* maybe the lock was granted right away and layout
                 * is packed into RMF_DLM_LVB of req
@@ -715,7 +673,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
        if (lock && ldlm_has_layout(lock) && lvb_data) {
                void *lmm;
 
-               LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d\n",
+               LDLM_DEBUG(lock, "layout lock returned by: %s, lvb_len: %d",
                           ldlm_it2str(it->it_op), lvb_len);
 
                lmm = libcfs_kvzalloc(lvb_len, GFP_NOFS);
@@ -923,9 +881,9 @@ resend:
                }
                ptlrpc_req_finished(req);
 
-               it->d.lustre.it_lock_handle = 0;
-               it->d.lustre.it_lock_mode = 0;
-               it->d.lustre.it_data = NULL;
+               it->it_lock_handle = 0;
+               it->it_lock_mode = 0;
+               it->it_request = NULL;
        }
 
        return rc;
@@ -949,8 +907,8 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
                /* The server failed before it even started executing the
                 * intent, i.e. because it couldn't unpack the request.
                 */
-               LASSERT(it->d.lustre.it_status != 0);
-               return it->d.lustre.it_status;
+               LASSERT(it->it_status != 0);
+               return it->it_status;
        }
        rc = it_open_error(DISP_IT_EXECD, it);
        if (rc)
@@ -1033,15 +991,15 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
                                    LDLM_IBITS, &policy, LCK_NL,
                                    &old_lock, 0)) {
                        ldlm_lock_decref_and_cancel(lockh,
-                                                   it->d.lustre.it_lock_mode);
+                                                   it->it_lock_mode);
                        memcpy(lockh, &old_lock, sizeof(old_lock));
-                       it->d.lustre.it_lock_handle = lockh->cookie;
+                       it->it_lock_handle = lockh->cookie;
                }
        }
        CDEBUG(D_DENTRY,
               "D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
               op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
-              it->d.lustre.it_status, it->d.lustre.it_disposition, rc);
+              it->it_status, it->it_disposition, rc);
        return rc;
 }
 
@@ -1057,8 +1015,8 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
        ldlm_policy_data_t policy;
        enum ldlm_mode mode;
 
-       if (it->d.lustre.it_lock_handle) {
-               lockh.cookie = it->d.lustre.it_lock_handle;
+       if (it->it_lock_handle) {
+               lockh.cookie = it->it_lock_handle;
                mode = ldlm_revalidate_lock_handle(&lockh, bits);
        } else {
                fid_build_reg_res_name(fid, &res_id);
@@ -1099,11 +1057,11 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
        }
 
        if (mode) {
-               it->d.lustre.it_lock_handle = lockh.cookie;
-               it->d.lustre.it_lock_mode = mode;
+               it->it_lock_handle = lockh.cookie;
+               it->it_lock_mode = mode;
        } else {
-               it->d.lustre.it_lock_handle = 0;
-               it->d.lustre.it_lock_mode = 0;
+               it->it_lock_handle = 0;
+               it->it_lock_mode = 0;
        }
 
        return !!mode;
@@ -1125,15 +1083,15 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
  * ll_create/ll_open gets called.
  *
  * The server will return to us, in it_disposition, an indication of
- * exactly what d.lustre.it_status refers to.
+ * exactly what it_status refers to.
  *
- * If DISP_OPEN_OPEN is set, then d.lustre.it_status refers to the open() call,
+ * If DISP_OPEN_OPEN is set, then it_status refers to the open() call,
  * otherwise if DISP_OPEN_CREATE is set, then it status is the
  * creation failure mode.  In either case, one of DISP_LOOKUP_NEG or
  * DISP_LOOKUP_POS will be set, indicating whether the child lookup
  * was successful.
  *
- * Else, if DISP_LOOKUP_EXECD then d.lustre.it_status is the rc of the
+ * Else, if DISP_LOOKUP_EXECD then it_status is the rc of the
  * child lookup.
  */
 int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
@@ -1166,7 +1124,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
                 * be called in revalidate_it if we already have a lock, let's
                 * verify that.
                 */
-               it->d.lustre.it_lock_handle = 0;
+               it->it_lock_handle = 0;
                rc = mdc_revalidate_lock(exp, it, &op_data->op_fid2, NULL);
                /* Only return failure if it was not GETATTR by cfid
                 * (from inode_revalidate)
@@ -1188,7 +1146,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
        if (rc < 0)
                return rc;
 
-       *reqp = it->d.lustre.it_data;
+       *reqp = it->it_request;
        rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
        return rc;
 }
index 4ef3db147f8722d8ccff2524154895055dc5f051..5dba2c81385718838b74f9f61d457401ef5ade8e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -234,7 +230,7 @@ rebuild:
                                                MDS_INODELOCK_UPDATE);
 
        req = ptlrpc_request_alloc(class_exp2cliimp(exp),
-                                  &RQF_MDS_REINT_CREATE_RMT_ACL);
+                                  &RQF_MDS_REINT_CREATE_ACL);
        if (!req) {
                ldlm_lock_list_put(&cancels, l_bl_ast, count);
                return -ENOMEM;
index 86b7445365f43b2c16ace7e57b0ec57d26e2605a..d4cc73bb6e1eaea0baca4f6574fb162ecd225790 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -150,16 +146,6 @@ static int mdc_getattr_common(struct obd_export *exp,
                        return -EPROTO;
        }
 
-       if (body->valid & OBD_MD_FLRMTPERM) {
-               struct mdt_remote_perm *perm;
-
-               LASSERT(client_is_remote(exp));
-               perm = req_capsule_server_swab_get(pill, &RMF_ACL,
-                                               lustre_swab_mdt_remote_perm);
-               if (!perm)
-                       return -EPROTO;
-       }
-
        return 0;
 }
 
@@ -190,11 +176,6 @@ static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
 
        req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
                             op_data->op_mode);
-       if (op_data->op_valid & OBD_MD_FLRMTPERM) {
-               LASSERT(client_is_remote(exp));
-               req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
-                                    sizeof(struct mdt_remote_perm));
-       }
        ptlrpc_request_set_replen(req);
 
        rc = mdc_getattr_common(exp, req);
@@ -539,16 +520,7 @@ static int mdc_get_lustre_md(struct obd_export *exp,
        }
        rc = 0;
 
-       if (md->body->valid & OBD_MD_FLRMTPERM) {
-               /* remote permission */
-               LASSERT(client_is_remote(exp));
-               md->remote_perm = req_capsule_server_swab_get(pill, &RMF_ACL,
-                                               lustre_swab_mdt_remote_perm);
-               if (!md->remote_perm) {
-                       rc = -EPROTO;
-                       goto out;
-               }
-       } else if (md->body->valid & OBD_MD_FLACL) {
+       if (md->body->valid & OBD_MD_FLACL) {
                /* for ACL, it's possible that FLACL is set but aclsize is zero.
                 * only when aclsize != 0 there's an actual segment for ACL
                 * in reply buffer.
@@ -665,7 +637,7 @@ int mdc_set_open_replay_data(struct obd_export *exp,
        struct md_open_data   *mod;
        struct mdt_rec_create *rec;
        struct mdt_body       *body;
-       struct ptlrpc_request *open_req = it->d.lustre.it_data;
+       struct ptlrpc_request *open_req = it->it_request;
        struct obd_import     *imp = open_req->rq_import;
 
        if (!open_req->rq_replay)
@@ -1168,7 +1140,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
                goto out;
        }
 
-       mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+       mdc_pack_body(req, NULL, 0, 0, -1, 0);
 
        /* Copy hsm_progress struct */
        req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
@@ -1202,7 +1174,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
                goto out;
        }
 
-       mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+       mdc_pack_body(req, NULL, 0, 0, -1, 0);
 
        /* Copy hsm_progress struct */
        archive_mask = req_capsule_client_get(&req->rq_pill,
@@ -1241,7 +1213,7 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp,
                return rc;
        }
 
-       mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
+       mdc_pack_body(req, &op_data->op_fid1, 0, 0,
                      op_data->op_suppgids[0], 0);
 
        ptlrpc_request_set_replen(req);
@@ -1277,7 +1249,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
                goto out;
        }
 
-       mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+       mdc_pack_body(req, NULL, 0, 0, -1, 0);
 
        ptlrpc_request_set_replen(req);
 
@@ -1306,7 +1278,7 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp,
                return rc;
        }
 
-       mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
+       mdc_pack_body(req, &op_data->op_fid1, 0, 0,
                      op_data->op_suppgids[0], 0);
 
        ptlrpc_request_set_replen(req);
@@ -1347,7 +1319,7 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
                return rc;
        }
 
-       mdc_pack_body(req, &op_data->op_fid1, OBD_MD_FLRMTPERM, 0,
+       mdc_pack_body(req, &op_data->op_fid1, 0, 0,
                      op_data->op_suppgids[0], 0);
 
        /* Copy states */
@@ -1394,7 +1366,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
                return rc;
        }
 
-       mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
+       mdc_pack_body(req, NULL, 0, 0, -1, 0);
 
        /* Copy hsm_request struct */
        req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
@@ -1807,7 +1779,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
        case IOC_OBD_STATFS: {
                struct obd_statfs stat_buf = {0};
 
-               if (*((__u32 *) data->ioc_inlbuf2) != 0) {
+               if (*((__u32 *)data->ioc_inlbuf2) != 0) {
                        rc = -ENODEV;
                        goto out;
                }
@@ -2001,7 +1973,7 @@ static int mdc_hsm_copytool_send(int len, void *val)
 
        if (len < sizeof(*lh) + sizeof(*hal)) {
                CERROR("Short HSM message %d < %d\n", len,
-                      (int) (sizeof(*lh) + sizeof(*hal)));
+                      (int)(sizeof(*lh) + sizeof(*hal)));
                return -EPROTO;
        }
        if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
@@ -2432,41 +2404,6 @@ static int mdc_process_config(struct obd_device *obd, u32 len, void *buf)
        return rc;
 }
 
-/* get remote permission for current user on fid */
-static int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
-                              __u32 suppgid, struct ptlrpc_request **request)
-{
-       struct ptlrpc_request  *req;
-       int                 rc;
-
-       LASSERT(client_is_remote(exp));
-
-       *request = NULL;
-       req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
-       if (!req)
-               return -ENOMEM;
-
-       rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
-       if (rc) {
-               ptlrpc_request_free(req);
-               return rc;
-       }
-
-       mdc_pack_body(req, fid, OBD_MD_FLRMTPERM, 0, suppgid, 0);
-
-       req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
-                            sizeof(struct mdt_remote_perm));
-
-       ptlrpc_request_set_replen(req);
-
-       rc = ptlrpc_queue_wait(req);
-       if (rc)
-               ptlrpc_req_finished(req);
-       else
-               *request = req;
-       return rc;
-}
-
 static struct obd_ops mdc_obd_ops = {
        .owner          = THIS_MODULE,
        .setup          = mdc_setup,
@@ -2518,7 +2455,6 @@ static struct md_ops mdc_md_ops = {
        .free_lustre_md         = mdc_free_lustre_md,
        .set_open_replay_data   = mdc_set_open_replay_data,
        .clear_open_replay_data = mdc_clear_open_replay_data,
-       .get_remote_perm        = mdc_get_remote_perm,
        .intent_getattr_async   = mdc_intent_getattr_async,
        .revalidate_lock        = mdc_revalidate_lock
 };
index 8d5bc5a751a4989e8bdaf215f39080448dd739b1..0735220b2a1839a1c97d0a1f3a4e0e26f7d884e0 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 82fb8f46e037d6260c63fb75f928630f0a5bff6a..f146f7521c92d93e1342116eeb988922ad1477c1 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 2311a437c44194dc30ffbba65a154c7156f5d518..9d0bd474586580585a8819afaa6845f15d6219ac 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -500,7 +496,9 @@ static void do_requeue(struct config_llog_data *cld)
         * export which is being disconnected. Take the client
         * semaphore to make the check non-racy.
         */
-       down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
+       down_read_nested(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem,
+                        OBD_CLI_SEM_MGC);
+
        if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
                int rc;
 
@@ -1034,7 +1032,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
                rc = sptlrpc_parse_flavor(val, &flvr);
                if (rc) {
                        CERROR("invalid sptlrpc flavor %s to MGS\n",
-                              (char *) val);
+                              (char *)val);
                        return rc;
                }
 
@@ -1050,7 +1048,7 @@ static int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
                        sptlrpc_flavor2name(&cli->cl_flvr_mgc,
                                            str, sizeof(str));
                        LCONSOLE_ERROR("asking sptlrpc flavor %s to MGS but currently %s is in use\n",
-                                      (char *) val, str);
+                                      (char *)val, str);
                        rc = -EPERM;
                }
                return rc;
index c404eb3864ffd8770d4713cd82e3f26208de7348..df7e47f35a66663d517350c61118e96accbf3a83 100644 (file)
@@ -5,5 +5,4 @@ obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
              genops.o uuid.o lprocfs_status.o lprocfs_counters.o \
              lustre_handles.o lustre_peer.o statfs_pack.o \
              obdo.o obd_config.o obd_mount.o lu_object.o lu_ref.o \
-             cl_object.o cl_page.o cl_lock.o cl_io.o \
-             acl.o kernelcomm.o
+             cl_object.o cl_page.o cl_lock.o cl_io.o kernelcomm.o
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
deleted file mode 100644 (file)
index 0e02ae9..0000000
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/obdclass/acl.c
- *
- * Lustre Access Control List.
- *
- * Author: Fan Yong <fanyong@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_SEC
-#include "../include/lu_object.h"
-#include "../include/lustre_acl.h"
-#include "../include/lustre_eacl.h"
-#include "../include/obd_support.h"
-
-#ifdef CONFIG_FS_POSIX_ACL
-
-#define CFS_ACL_XATTR_VERSION POSIX_ACL_XATTR_VERSION
-
-enum {
-       ES_UNK  = 0,    /* unknown stat */
-       ES_UNC  = 1,    /* ACL entry is not changed */
-       ES_MOD  = 2,    /* ACL entry is modified */
-       ES_ADD  = 3,    /* ACL entry is added */
-       ES_DEL  = 4     /* ACL entry is deleted */
-};
-
-static inline void lustre_ext_acl_le_to_cpu(ext_acl_xattr_entry *d,
-                                           ext_acl_xattr_entry *s)
-{
-       d->e_tag        = le16_to_cpu(s->e_tag);
-       d->e_perm       = le16_to_cpu(s->e_perm);
-       d->e_id  = le32_to_cpu(s->e_id);
-       d->e_stat       = le32_to_cpu(s->e_stat);
-}
-
-static inline void lustre_ext_acl_cpu_to_le(ext_acl_xattr_entry *d,
-                                           ext_acl_xattr_entry *s)
-{
-       d->e_tag        = cpu_to_le16(s->e_tag);
-       d->e_perm       = cpu_to_le16(s->e_perm);
-       d->e_id  = cpu_to_le32(s->e_id);
-       d->e_stat       = cpu_to_le32(s->e_stat);
-}
-
-static inline void lustre_posix_acl_le_to_cpu(posix_acl_xattr_entry *d,
-                                             posix_acl_xattr_entry *s)
-{
-       d->e_tag        = le16_to_cpu(s->e_tag);
-       d->e_perm       = le16_to_cpu(s->e_perm);
-       d->e_id  = le32_to_cpu(s->e_id);
-}
-
-static inline void lustre_posix_acl_cpu_to_le(posix_acl_xattr_entry *d,
-                                             posix_acl_xattr_entry *s)
-{
-       d->e_tag        = cpu_to_le16(s->e_tag);
-       d->e_perm       = cpu_to_le16(s->e_perm);
-       d->e_id  = cpu_to_le32(s->e_id);
-}
-
-/* if "new_count == 0", then "new = {a_version, NULL}", NOT NULL. */
-static int lustre_posix_acl_xattr_reduce_space(posix_acl_xattr_header **header,
-                                              int old_count, int new_count)
-{
-       int old_size = CFS_ACL_XATTR_SIZE(old_count, posix_acl_xattr);
-       int new_size = CFS_ACL_XATTR_SIZE(new_count, posix_acl_xattr);
-       posix_acl_xattr_header *new;
-
-       if (unlikely(old_count <= new_count))
-               return old_size;
-
-       new = kmemdup(*header, new_size, GFP_NOFS);
-       if (unlikely(!new))
-               return -ENOMEM;
-
-       kfree(*header);
-       *header = new;
-       return new_size;
-}
-
-/* if "new_count == 0", then "new = {0, NULL}", NOT NULL. */
-static int lustre_ext_acl_xattr_reduce_space(ext_acl_xattr_header **header,
-                                            int old_count)
-{
-       int ext_count = le32_to_cpu((*header)->a_count);
-       int ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
-       ext_acl_xattr_header *new;
-
-       if (unlikely(old_count <= ext_count))
-               return 0;
-
-       new = kmemdup(*header, ext_size, GFP_NOFS);
-       if (unlikely(!new))
-               return -ENOMEM;
-
-       kfree(*header);
-       *header = new;
-       return 0;
-}
-
-/*
- * Generate new extended ACL based on the posix ACL.
- */
-ext_acl_xattr_header *
-lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
-{
-       int count, i, esize;
-       ext_acl_xattr_header *new;
-
-       if (unlikely(size < 0))
-               return ERR_PTR(-EINVAL);
-       else if (!size)
-               count = 0;
-       else
-               count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
-       esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
-       new = kzalloc(esize, GFP_NOFS);
-       if (unlikely(!new))
-               return ERR_PTR(-ENOMEM);
-
-       new->a_count = cpu_to_le32(count);
-       for (i = 0; i < count; i++) {
-               new->a_entries[i].e_tag  = header->a_entries[i].e_tag;
-               new->a_entries[i].e_perm = header->a_entries[i].e_perm;
-               new->a_entries[i].e_id   = header->a_entries[i].e_id;
-               new->a_entries[i].e_stat = cpu_to_le32(ES_UNK);
-       }
-
-       return new;
-}
-EXPORT_SYMBOL(lustre_posix_acl_xattr_2ext);
-
-/*
- * Filter out the "nobody" entries in the posix ACL.
- */
-int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, size_t size,
-                                 posix_acl_xattr_header **out)
-{
-       int count, i, j, rc = 0;
-       __u32 id;
-       posix_acl_xattr_header *new;
-
-       if (!size)
-               return 0;
-       if (size < sizeof(*new))
-               return -EINVAL;
-
-       new = kzalloc(size, GFP_NOFS);
-       if (unlikely(!new))
-               return -ENOMEM;
-
-       new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
-       count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
-       for (i = 0, j = 0; i < count; i++) {
-               id = le32_to_cpu(header->a_entries[i].e_id);
-               switch (le16_to_cpu(header->a_entries[i].e_tag)) {
-               case ACL_USER_OBJ:
-               case ACL_GROUP_OBJ:
-               case ACL_MASK:
-               case ACL_OTHER:
-                       if (id != ACL_UNDEFINED_ID) {
-                               rc = -EIO;
-                               goto _out;
-                       }
-
-                       memcpy(&new->a_entries[j++], &header->a_entries[i],
-                              sizeof(posix_acl_xattr_entry));
-                       break;
-               case ACL_USER:
-                       if (id != NOBODY_UID)
-                               memcpy(&new->a_entries[j++],
-                                      &header->a_entries[i],
-                                      sizeof(posix_acl_xattr_entry));
-                       break;
-               case ACL_GROUP:
-                       if (id != NOBODY_GID)
-                               memcpy(&new->a_entries[j++],
-                                      &header->a_entries[i],
-                                      sizeof(posix_acl_xattr_entry));
-                       break;
-               default:
-                       rc = -EIO;
-                       goto _out;
-               }
-       }
-
-       /* free unused space. */
-       rc = lustre_posix_acl_xattr_reduce_space(&new, count, j);
-       if (rc >= 0) {
-               size = rc;
-               *out = new;
-               rc = 0;
-       }
-
-_out:
-       if (rc) {
-               kfree(new);
-               size = rc;
-       }
-       return size;
-}
-EXPORT_SYMBOL(lustre_posix_acl_xattr_filter);
-
-/*
- * Release the extended ACL space.
- */
-void lustre_ext_acl_xattr_free(ext_acl_xattr_header *header)
-{
-       kfree(header);
-}
-EXPORT_SYMBOL(lustre_ext_acl_xattr_free);
-
-static ext_acl_xattr_entry *
-lustre_ext_acl_xattr_search(ext_acl_xattr_header *header,
-                           posix_acl_xattr_entry *entry, int *pos)
-{
-       int once, start, end, i, j, count = le32_to_cpu(header->a_count);
-
-       once = 0;
-       start = *pos;
-       end = count;
-
-again:
-       for (i = start; i < end; i++) {
-               if (header->a_entries[i].e_tag == entry->e_tag &&
-                   header->a_entries[i].e_id == entry->e_id) {
-                       j = i;
-                       if (++i >= count)
-                               i = 0;
-                       *pos = i;
-                       return &header->a_entries[j];
-               }
-       }
-
-       if (!once) {
-               once = 1;
-               start = 0;
-               end = *pos;
-               goto again;
-       }
-
-       return NULL;
-}
-
-/*
- * Merge the posix ACL and the extended ACL into new extended ACL.
- */
-ext_acl_xattr_header *
-lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
-                          ext_acl_xattr_header *ext_header)
-{
-       int ori_ext_count, posix_count, ext_count, ext_size;
-       int i, j, pos = 0, rc = 0;
-       posix_acl_xattr_entry pae;
-       ext_acl_xattr_header *new;
-       ext_acl_xattr_entry *ee, eae;
-
-       if (unlikely(size < 0))
-               return ERR_PTR(-EINVAL);
-       else if (!size)
-               posix_count = 0;
-       else
-               posix_count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
-       ori_ext_count = le32_to_cpu(ext_header->a_count);
-       ext_count = posix_count + ori_ext_count;
-       ext_size = CFS_ACL_XATTR_SIZE(ext_count, ext_acl_xattr);
-
-       new = kzalloc(ext_size, GFP_NOFS);
-       if (unlikely(!new))
-               return ERR_PTR(-ENOMEM);
-
-       for (i = 0, j = 0; i < posix_count; i++) {
-               lustre_posix_acl_le_to_cpu(&pae, &posix_header->a_entries[i]);
-               switch (pae.e_tag) {
-               case ACL_USER_OBJ:
-               case ACL_GROUP_OBJ:
-               case ACL_MASK:
-               case ACL_OTHER:
-                       if (pae.e_id != ACL_UNDEFINED_ID) {
-                               rc = -EIO;
-                               goto out;
-               }
-               case ACL_USER:
-                       /* ignore "nobody" entry. */
-                       if (pae.e_id == NOBODY_UID)
-                               break;
-
-                       new->a_entries[j].e_tag =
-                                       posix_header->a_entries[i].e_tag;
-                       new->a_entries[j].e_perm =
-                                       posix_header->a_entries[i].e_perm;
-                       new->a_entries[j].e_id =
-                                       posix_header->a_entries[i].e_id;
-                       ee = lustre_ext_acl_xattr_search(ext_header,
-                                       &posix_header->a_entries[i], &pos);
-                       if (ee) {
-                               if (posix_header->a_entries[i].e_perm !=
-                                                               ee->e_perm)
-                                       /* entry modified. */
-                                       ee->e_stat =
-                                       new->a_entries[j++].e_stat =
-                                                       cpu_to_le32(ES_MOD);
-                               else
-                                       /* entry unchanged. */
-                                       ee->e_stat =
-                                       new->a_entries[j++].e_stat =
-                                                       cpu_to_le32(ES_UNC);
-                       } else {
-                               /* new entry. */
-                               new->a_entries[j++].e_stat =
-                                                       cpu_to_le32(ES_ADD);
-                       }
-                       break;
-               case ACL_GROUP:
-                       /* ignore "nobody" entry. */
-                       if (pae.e_id == NOBODY_GID)
-                               break;
-                       new->a_entries[j].e_tag =
-                                       posix_header->a_entries[i].e_tag;
-                       new->a_entries[j].e_perm =
-                                       posix_header->a_entries[i].e_perm;
-                       new->a_entries[j].e_id =
-                                       posix_header->a_entries[i].e_id;
-                       ee = lustre_ext_acl_xattr_search(ext_header,
-                                       &posix_header->a_entries[i], &pos);
-                       if (ee) {
-                               if (posix_header->a_entries[i].e_perm !=
-                                                               ee->e_perm)
-                                       /* entry modified. */
-                                       ee->e_stat =
-                                       new->a_entries[j++].e_stat =
-                                                       cpu_to_le32(ES_MOD);
-                               else
-                                       /* entry unchanged. */
-                                       ee->e_stat =
-                                       new->a_entries[j++].e_stat =
-                                                       cpu_to_le32(ES_UNC);
-                       } else {
-                               /* new entry. */
-                               new->a_entries[j++].e_stat =
-                                                       cpu_to_le32(ES_ADD);
-                       }
-                       break;
-               default:
-                       rc = -EIO;
-                       goto out;
-               }
-       }
-
-       /* process deleted entries. */
-       for (i = 0; i < ori_ext_count; i++) {
-               lustre_ext_acl_le_to_cpu(&eae, &ext_header->a_entries[i]);
-               if (eae.e_stat == ES_UNK) {
-                       /* ignore "nobody" entry. */
-                       if ((eae.e_tag == ACL_USER && eae.e_id == NOBODY_UID) ||
-                           (eae.e_tag == ACL_GROUP && eae.e_id == NOBODY_GID))
-                               continue;
-
-                       new->a_entries[j].e_tag =
-                                               ext_header->a_entries[i].e_tag;
-                       new->a_entries[j].e_perm =
-                                               ext_header->a_entries[i].e_perm;
-                       new->a_entries[j].e_id = ext_header->a_entries[i].e_id;
-                       new->a_entries[j++].e_stat = cpu_to_le32(ES_DEL);
-               }
-       }
-
-       new->a_count = cpu_to_le32(j);
-       /* free unused space. */
-       rc = lustre_ext_acl_xattr_reduce_space(&new, ext_count);
-
-out:
-       if (rc) {
-               kfree(new);
-               new = ERR_PTR(rc);
-       }
-       return new;
-}
-EXPORT_SYMBOL(lustre_acl_xattr_merge2ext);
-
-#endif
index 7eb0ad7b36444f155b3958b79d003d6d31105c56..e866754a42d53868ee3e2c7ad1b3be0ae57ed5b7 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 583fb5f3388980a05cbd62f7f50ab1af74516d08..e72f1fc00a1365094c42e51ee2183438549b8389 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 26a576b63a7262e94d110c0ef5e9f293c6fe2fbb..9d7b5939b0fdb8de93ea4fd422bfe7b5610d8648 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 5940f30318eca6d227c5393f05dedff0c322a1cb..91a5806d0239b8bafd7c825ca763c6be05335f6d 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -577,7 +573,7 @@ static inline struct cl_env *cl_env_fetch(void)
 {
        struct cl_env *cle;
 
-       cle = cfs_hash_lookup(cl_env_hash, (void *) (long) current->pid);
+       cle = cfs_hash_lookup(cl_env_hash, (void *)(long)current->pid);
        LASSERT(ergo(cle, cle->ce_magic == &cl_env_init0));
        return cle;
 }
@@ -588,7 +584,7 @@ static inline void cl_env_attach(struct cl_env *cle)
                int rc;
 
                LASSERT(!cle->ce_owner);
-               cle->ce_owner = (void *) (long) current->pid;
+               cle->ce_owner = (void *)(long)current->pid;
                rc = cfs_hash_add_unique(cl_env_hash, cle->ce_owner,
                                         &cle->ce_node);
                LASSERT(rc == 0);
@@ -599,7 +595,7 @@ static inline void cl_env_do_detach(struct cl_env *cle)
 {
        void *cookie;
 
-       LASSERT(cle->ce_owner == (void *) (long) current->pid);
+       LASSERT(cle->ce_owner == (void *)(long)current->pid);
        cookie = cfs_hash_del(cl_env_hash, cle->ce_owner,
                              &cle->ce_node);
        LASSERT(cookie == cle);
index b754f516e5577ed51685efc8e180e9624ba34fd5..db2dc6b390736ef3c494273d6ed07c06d340d947 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -1076,3 +1072,49 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
        slice->cpl_page = page;
 }
 EXPORT_SYMBOL(cl_page_slice_add);
+
+/**
+ * Allocate and initialize cl_cache, called by ll_init_sbi().
+ */
+struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
+{
+       struct cl_client_cache  *cache = NULL;
+
+       cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+       if (!cache)
+               return NULL;
+
+       /* Initialize cache data */
+       atomic_set(&cache->ccc_users, 1);
+       cache->ccc_lru_max = lru_page_max;
+       atomic_set(&cache->ccc_lru_left, lru_page_max);
+       spin_lock_init(&cache->ccc_lru_lock);
+       INIT_LIST_HEAD(&cache->ccc_lru);
+
+       atomic_set(&cache->ccc_unstable_nr, 0);
+       init_waitqueue_head(&cache->ccc_unstable_waitq);
+
+       return cache;
+}
+EXPORT_SYMBOL(cl_cache_init);
+
+/**
+ * Increase cl_cache refcount
+ */
+void cl_cache_incref(struct cl_client_cache *cache)
+{
+       atomic_inc(&cache->ccc_users);
+}
+EXPORT_SYMBOL(cl_cache_incref);
+
+/**
+ * Decrease cl_cache refcount and free the cache if refcount=0.
+ * Since llite, lov and osc all hold cl_cache refcount,
+ * the free will not cause race. (LU-6173)
+ */
+void cl_cache_decref(struct cl_client_cache *cache)
+{
+       if (atomic_dec_and_test(&cache->ccc_users))
+               kfree(cache);
+}
+EXPORT_SYMBOL(cl_cache_decref);
index f48816af8be769efc7b61dc8b90acc0721d8a3e7..d9d2a1952b8bb7edb035f8e65f82d6202230d34e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e4edfb2c0a203c479bff20e2fd98d3d3ff125d51..8acf67239fa8c9e2fd50a957c509ac813ac2504c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index d95f11d62a32d3012e54ae560ba0f0f61d282f84..99c2da632b51431d6ca52c7b2608668fb575f35a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 8405eccdac1953c0a4c8deab130fcf301064ebe9..a0f65c470f4daccfbcf872d95d4e524026cbe049 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 2cd4522462d9a3ca5f17daaf34b42340129d93c3..33342bfcc90e7030ccfe2c8807115bfbaaa96f8a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index b41b65e2f021f38b501b0fcef03bb9e193c31b8a..c6cc6a7666e323cc8b5dd26bcaa3103768e321e9 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e6bf414a44446cc8f27b87c54a244563b9102f56..8f70dd2686f9ddc8387dcd9d55b4445f73b54e3e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 79194d8cb5877b49ee0a0982f362168cd1fd53ba..1784ca0634280de5ae5ce26b4d9b32d680174204 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -123,8 +119,10 @@ static int llog_read_header(const struct lu_env *env,
                handle->lgh_last_idx = 0; /* header is record with index 0 */
                llh->llh_count = 1;      /* for the header record */
                llh->llh_hdr.lrh_type = LLOG_HDR_MAGIC;
-               llh->llh_hdr.lrh_len = llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
-               llh->llh_hdr.lrh_index = llh->llh_tail.lrt_index = 0;
+               llh->llh_hdr.lrh_len = LLOG_CHUNK_SIZE;
+               llh->llh_tail.lrt_len = LLOG_CHUNK_SIZE;
+               llh->llh_hdr.lrh_index = 0;
+               llh->llh_tail.lrt_index = 0;
                llh->llh_timestamp = ktime_get_real_seconds();
                if (uuid)
                        memcpy(&llh->llh_tgtuuid, uuid,
index c27d4ec1df9e53caa43176541c7c019d5ed29231..a82a2950295a8972df5ecfd082b6102b10a64611 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 7fb48dda355e996cb1ff017a6eb7ec5e4a3e664f..f7949525d9524f332113ffd06ac9d45219a10df7 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 826623f528da19e0bf2cc5fb742411b4be33a87d..6ace7e0978593ebc1e2282ba964642a403417c75 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 967ba2e1bfcb60d36aa410d91c690b1993491045..f7b9b190350ce9069b9bfb4fdddb09c01e5909fd 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 5a1eae1de2ec2ee1d7f503dce49a735966ec740e..279b625f1afe468d2396d2aa1f04c0d16a1487d0 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e04385760f21cebc5093e984540e3d75c6ba1c02..9b03059f34d63f585232ed4e55e4ab1964b0ed82 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 993697b660f6a68be7f55c63f5746691ae101a8a..e9f6040d19eb124c66e8957ad2b5266ea96ba3ae 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 403ceea06186aa1f3fb9440aa0f42378d381474e..082f530c527c31f4aa5b9023f976f9d16579a3f4 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index b1abe023bb3563f9d48f1dcce2c7c1104fbbb23e..5974a9bf77c0b24e0545824559ac2db1a272c7cb 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index cb1d65c3d95d92be145a38418dd50593bdff9bfd..0eab1236501bd4d77b7bf870d483c3525edd928b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -1021,8 +1017,8 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
                /* Search proc entries */
                while (lvars[j].name) {
                        var = &lvars[j];
-                       if (!class_match_param(key, var->name, NULL)
-                           && keylen == strlen(var->name)) {
+                       if (!class_match_param(key, var->name, NULL) &&
+                           keylen == strlen(var->name)) {
                                matched++;
                                rc = -EROFS;
                                if (var->fops && var->fops->write) {
@@ -1077,7 +1073,7 @@ int class_config_llog_handler(const struct lu_env *env,
 {
        struct config_llog_instance *clli = data;
        int cfg_len = rec->lrh_len;
-       char *cfg_buf = (char *) (rec + 1);
+       char *cfg_buf = (char *)(rec + 1);
        int rc = 0;
 
        switch (rec->lrh_type) {
index e0c90adc72a7d11c09cea68a75564c5b82a3b50e..aa84a50e99041eda799dfa209da575ecbcd7fe8e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -192,7 +188,7 @@ static int lustre_start_simple(char *obdname, char *type, char *uuid,
        return rc;
 }
 
-DEFINE_MUTEX(mgc_start_lock);
+static DEFINE_MUTEX(mgc_start_lock);
 
 /** Set up a mgc obd to process startup logs
  *
index 748e33f017d52e1c814717785ba563f2b502e487..8583a4a8c206ef4d3f4e04d9b3b537ec83c6cd96 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index fb4e3ae845e0eeb5794ae0eec6e658aff0919693..4bad1fa27d4005a165d200b87618ef070fc58808 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index b0b0157a6334c152ddcafc8a0b46cd9f8de189f4..abd9b1ae72cdffce61e30a005155be3633b1fde8 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 91ef06f17934d7804ada283537c55083fc4d330c..5b29c4a44fe5080f25ed6018ce51484152b3b33e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 33a113213bf5fab34008db3ebed91b93024c3258..7e83d395b9986a2688de6b8b6aa417075a06c2b3 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 5a14bea961b4dcaa7641c2ad6201613edbf9f024..d1a7d6beee60f09187dfab9c021a79b139858c0b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -127,9 +123,9 @@ static const char *oes_strings[] = {
                /* ----- part 4 ----- */                                      \
                ## __VA_ARGS__);                                              \
        if (lvl == D_ERROR && __ext->oe_dlmlock)                              \
-               LDLM_ERROR(__ext->oe_dlmlock, "extent: %p\n", __ext);         \
+               LDLM_ERROR(__ext->oe_dlmlock, "extent: %p", __ext);           \
        else                                                                  \
-               LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p\n", __ext);         \
+               LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p", __ext);           \
 } while (0)
 
 #undef EASSERTF
@@ -2371,7 +2367,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
        oap->oap_obj_off = offset;
        LASSERT(!(offset & ~PAGE_MASK));
 
-       if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
+       if (capable(CFS_CAP_SYS_RESOURCE))
                oap->oap_brw_flags = OBD_BRW_NOQUOTA;
 
        INIT_LIST_HEAD(&oap->oap_pending_item);
@@ -2410,8 +2406,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
 
        /* Set the OBD_BRW_SRVLOCK before the page is queued. */
        brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
-       if (!client_is_remote(osc_export(osc)) &&
-           capable(CFS_CAP_SYS_RESOURCE)) {
+       if (capable(CFS_CAP_SYS_RESOURCE)) {
                brw_flags |= OBD_BRW_NOQUOTA;
                cmd |= OBD_BRW_NOQUOTA;
        }
@@ -2773,7 +2768,8 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
        ext->oe_sync = 1;
        ext->oe_urgent = 1;
        ext->oe_start = start;
-       ext->oe_end = ext->oe_max_end = end;
+       ext->oe_end = end;
+       ext->oe_max_end = end;
        ext->oe_obj = obj;
        ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
        ext->oe_nr_pages = page_count;
@@ -3308,7 +3304,8 @@ int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
                goto out;
 
        cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
-       info->oti_fn_index = info->oti_next_index = start;
+       info->oti_fn_index = start;
+       info->oti_next_index = start;
        do {
                res = osc_page_gang_lookup(env, io, osc,
                                           info->oti_next_index, end, cb, osc);
index ae19d396b53770a06de71d8f0c38653aa4dc115f..c8c3f1ca77be51ba93eebd394cbd2717a9062ede 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -66,7 +62,7 @@ struct osc_io {
        /** super class */
        struct cl_io_slice oi_cl;
        /** true if this io is lockless. */
-       int             oi_lockless;
+       unsigned int            oi_lockless;
        /** how many LRU pages are reserved for this IO */
        int oi_lru_reserved;
 
@@ -355,11 +351,6 @@ struct osc_page {
         * Boolean, true iff page is under transfer. Used for sanity checking.
         */
        unsigned              ops_transfer_pinned:1,
-       /**
-        * True for a `temporary page' created by read-ahead code, probably
-        * outside of any DLM lock.
-        */
-                             ops_temp:1,
        /**
         * in LRU?
         */
index d4fe507f165f463a065ce3f808aa12641f34f242..83d30c135ba4bcaf5c0b14aff3e28b836fb0c3da 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 7fad8278150f57923a938f0b2f862f0a3a6267cf..7a27f0961955c3bea63cad737622cbab589d6afa 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index d534b0e0edf69a6c69885b124539def70670e0d4..6e3dcd38913feb0200fac3bc6d28aef4f976d565 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -221,7 +217,8 @@ static void osc_page_touch_at(const struct lu_env *env,
               kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
               loi->loi_lvb.lvb_size);
 
-       attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
+       attr->cat_ctime = LTIME_S(CURRENT_TIME);
+       attr->cat_mtime = attr->cat_ctime;
        valid = CAT_MTIME | CAT_CTIME;
        if (kms > loi->loi_kms) {
                attr->cat_kms = kms;
@@ -458,7 +455,8 @@ static int osc_io_setattr_start(const struct lu_env *env,
                        unsigned int cl_valid = 0;
 
                        if (ia_valid & ATTR_SIZE) {
-                               attr->cat_size = attr->cat_kms = size;
+                               attr->cat_size = size;
+                               attr->cat_kms = size;
                                cl_valid = CAT_SIZE | CAT_KMS;
                        }
                        if (ia_valid & ATTR_MTIME_SET) {
@@ -526,7 +524,8 @@ static void osc_io_setattr_end(const struct lu_env *env,
 
        if (cbargs->opc_rpc_sent) {
                wait_for_completion(&cbargs->opc_sync);
-               result = io->ci_result = cbargs->opc_rc;
+               result = cbargs->opc_rc;
+               io->ci_result = cbargs->opc_rc;
        }
        if (result == 0) {
                if (oio->oi_lockless) {
@@ -575,7 +574,8 @@ static int osc_io_write_start(const struct lu_env *env,
 
        OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
        cl_object_attr_lock(obj);
-       attr->cat_mtime = attr->cat_ctime = ktime_get_real_seconds();
+       attr->cat_ctime = ktime_get_real_seconds();
+       attr->cat_mtime = attr->cat_ctime;
        rc = cl_object_attr_set(env, obj, attr, CAT_MTIME | CAT_CTIME);
        cl_object_attr_unlock(obj);
 
index 16f9cd9d3b12bfbe7aba3a4e691fb2371f06c7a7..717d3ffb6789bb33bc05adf507ddb29b80a60995 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -638,11 +634,10 @@ static int weigh_cb(const struct lu_env *env, struct cl_io *io,
 
        if (cl_page_is_vmlocked(env, page) ||
            PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
-          ) {
-               (*(unsigned long *)cbdata)++;
+          )
                return CLP_GANG_ABORT;
-       }
 
+       *(pgoff_t *)cbdata = osc_index(ops) + 1;
        return CLP_GANG_OKAY;
 }
 
@@ -652,7 +647,7 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
 {
        struct cl_io *io = &osc_env_info(env)->oti_io;
        struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
-       unsigned long npages = 0;
+       pgoff_t page_index;
        int result;
 
        io->ci_obj = obj;
@@ -661,11 +656,12 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
        if (result != 0)
                return result;
 
+       page_index = cl_index(obj, extent->start);
        do {
                result = osc_page_gang_lookup(env, io, oscobj,
-                                             cl_index(obj, extent->start),
+                                             page_index,
                                              cl_index(obj, extent->end),
-                                             weigh_cb, (void *)&npages);
+                                             weigh_cb, (void *)&page_index);
                if (result == CLP_GANG_ABORT)
                        break;
                if (result == CLP_GANG_RESCHED)
@@ -673,7 +669,7 @@ static unsigned long osc_lock_weight(const struct lu_env *env,
        } while (result != CLP_GANG_OKAY);
        cl_io_fini(env, io);
 
-       return npages;
+       return result == CLP_GANG_ABORT ? 1 : 0;
 }
 
 /**
@@ -703,7 +699,7 @@ unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
 
        LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
        obj = dlmlock->l_ast_data;
-       if (obj) {
+       if (!obj) {
                weight = 1;
                goto out;
        }
@@ -1120,7 +1116,8 @@ static void osc_lock_set_writer(const struct lu_env *env,
                }
        } else {
                LASSERT(cl_io_is_mkwrite(io));
-               io_start = io_end = io->u.ci_fault.ft_index;
+               io_start = io->u.ci_fault.ft_index;
+               io_end = io->u.ci_fault.ft_index;
        }
 
        if (descr->cld_mode >= CLM_WRITE &&
@@ -1171,7 +1168,7 @@ int osc_lock_init(const struct lu_env *env,
                osc_lock_set_writer(env, io, obj, oscl);
 
 
-       LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
+       LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx",
                          lock, oscl, oscl->ols_flags);
 
        return 0;
index 738ab10ab27424e8d25b96e1d14e9c93f9520fd7..d211d1905e831f2bb1661dde83661fb3c753f6f4 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index c29c2eabe39c2fcd7d091d6ea02f1cfdebf20230..355f496a2093a5b957c12147ca2bd7695ec1dd4b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -52,13 +48,6 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
  *  @{
  */
 
-static int osc_page_protected(const struct lu_env *env,
-                             const struct osc_page *opg,
-                             enum cl_lock_mode mode, int unref)
-{
-       return 1;
-}
-
 /*****************************************************************************
  *
  * Page operations.
@@ -110,8 +99,6 @@ int osc_page_cache_add(const struct lu_env *env,
        struct osc_page *opg = cl2osc_page(slice);
        int result;
 
-       LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
-
        osc_page_transfer_get(opg, "transfer\0cache");
        result = osc_queue_async_io(env, io, opg);
        if (result != 0)
@@ -214,8 +201,6 @@ static void osc_page_delete(const struct lu_env *env,
        struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
        int rc;
 
-       LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
-
        CDEBUG(D_TRACE, "%p\n", opg);
        osc_page_transfer_put(env, opg);
        rc = osc_teardown_async_page(env, obj, opg);
@@ -254,8 +239,6 @@ static void osc_page_clip(const struct lu_env *env,
        struct osc_page *opg = cl2osc_page(slice);
        struct osc_async_page *oap = &opg->ops_oap;
 
-       LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
-
        opg->ops_from = from;
        opg->ops_to = to;
        spin_lock(&oap->oap_lock);
@@ -269,8 +252,6 @@ static int osc_page_cancel(const struct lu_env *env,
        struct osc_page *opg = cl2osc_page(slice);
        int rc = 0;
 
-       LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
-
        /* Check if the transferring against this page
         * is completed, or not even queued.
         */
@@ -320,10 +301,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
                cl_page_slice_add(page, &opg->ops_cl, obj, index,
                                  &osc_page_ops);
        }
-       /*
-        * Cannot assert osc_page_protected() here as read-ahead
-        * creates temporary pages outside of a lock.
-        */
        /* ops_inflight and ops_lru are the same field, but it doesn't
         * hurt to initialize it twice :-)
         */
@@ -380,10 +357,6 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
                     enum cl_req_type crt, int brw_flags)
 {
        struct osc_async_page *oap = &opg->ops_oap;
-       struct osc_object *obj = oap->oap_obj;
-
-       LINVRNT(osc_page_protected(env, opg,
-                                  crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
 
        LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, magic 0x%x\n",
                 oap, oap->oap_magic);
@@ -398,8 +371,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
        if (osc_over_unstable_soft_limit(oap->oap_cli))
                oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
 
-       if (!client_is_remote(osc_export(obj)) &&
-           capable(CFS_CAP_SYS_RESOURCE)) {
+       if (capable(CFS_CAP_SYS_RESOURCE)) {
                oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
                oap->oap_cmd |= OBD_BRW_NOQUOTA;
        }
@@ -440,7 +412,7 @@ static int osc_cache_too_much(struct client_obd *cli)
        int pages = atomic_read(&cli->cl_lru_in_list);
        unsigned long budget;
 
-       budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
+       budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
 
        /* if it's going to run out LRU slots, we should free some, but not
         * too much to maintain fairness among OSCs.
@@ -740,7 +712,7 @@ int osc_lru_reclaim(struct client_obd *cli)
        cache->ccc_lru_shrinkers++;
        list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
 
-       max_scans = atomic_read(&cache->ccc_users);
+       max_scans = atomic_read(&cache->ccc_users) - 2;
        while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
                cli = list_entry(cache->ccc_lru.next, struct client_obd,
                                 cl_lru_osc);
index 47417f88fe3c209f80cdc58cdc90f3fedd237f36..536b868ff776b8ad38f93c23f628e4e875991785 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -474,7 +470,8 @@ static int osc_real_create(struct obd_export *exp, struct obdo *oa,
                DEBUG_REQ(D_HA, req,
                          "delorphan from OST integration");
                /* Don't resend the delorphan req */
-               req->rq_no_resend = req->rq_no_delay = 1;
+               req->rq_no_resend = 1;
+               req->rq_no_delay = 1;
        }
 
        rc = ptlrpc_queue_wait(req);
@@ -2249,7 +2246,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
        struct lustre_handle lockh = { 0 };
        struct ptlrpc_request *req = NULL;
        int intent = *flags & LDLM_FL_HAS_INTENT;
-       __u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
+       __u64 match_flags = *flags;
        enum ldlm_mode mode;
        int rc;
 
@@ -2284,7 +2281,11 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
        mode = einfo->ei_mode;
        if (einfo->ei_mode == LCK_PR)
                mode |= LCK_PW;
-       mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
+       if (agl == 0)
+               match_flags |= LDLM_FL_LVB_READY;
+       if (intent != 0)
+               match_flags |= LDLM_FL_BLOCK_GRANTED;
+       mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
                               einfo->ei_type, policy, mode, &lockh, 0);
        if (mode) {
                struct ldlm_lock *matched;
@@ -2775,7 +2776,8 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
                tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
                memcpy(tmp, key, keylen);
 
-               req->rq_no_delay = req->rq_no_resend = 1;
+               req->rq_no_delay = 1;
+               req->rq_no_resend = 1;
                ptlrpc_request_set_replen(req);
                rc = ptlrpc_queue_wait(req);
                if (rc)
@@ -2915,7 +2917,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
 
                LASSERT(!cli->cl_cache); /* only once */
                cli->cl_cache = val;
-               atomic_inc(&cli->cl_cache->ccc_users);
+               cl_cache_incref(cli->cl_cache);
                cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
 
                /* add this osc into entity list */
@@ -3295,7 +3297,7 @@ static int osc_cleanup(struct obd_device *obd)
                list_del_init(&cli->cl_lru_osc);
                spin_unlock(&cli->cl_cache->ccc_lru_lock);
                cli->cl_lru_left = NULL;
-               atomic_dec(&cli->cl_cache->ccc_users);
+               cl_cache_decref(cli->cl_cache);
                cli->cl_cache = NULL;
        }
 
index 4b7912a2cb529a613085ef0fbd490cac2f5a3b2b..d4463d7c81d2938cf0ed1361cc129aafc0e386c8 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -587,14 +583,19 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
        spin_unlock(&pool->prp_lock);
 }
 
-static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
-                                     __u32 version, int opcode,
-                                     int count, __u32 *lengths, char **bufs,
-                                     struct ptlrpc_cli_ctx *ctx)
+int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
+                            __u32 version, int opcode, char **bufs,
+                            struct ptlrpc_cli_ctx *ctx)
 {
-       struct obd_import *imp = request->rq_import;
+       int count;
+       struct obd_import *imp;
+       __u32 *lengths;
        int rc;
 
+       count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
+       imp = request->rq_import;
+       lengths = request->rq_pill.rc_area[RCL_CLIENT];
+
        if (unlikely(ctx)) {
                request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
        } else {
@@ -602,20 +603,16 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
                if (rc)
                        goto out_free;
        }
-
        sptlrpc_req_set_flavor(request, opcode);
 
        rc = lustre_pack_request(request, imp->imp_msg_magic, count,
                                 lengths, bufs);
-       if (rc) {
-               LASSERT(!request->rq_pool);
+       if (rc)
                goto out_ctx;
-       }
 
        lustre_msg_add_version(request->rq_reqmsg, version);
        request->rq_send_state = LUSTRE_IMP_FULL;
        request->rq_type = PTL_RPC_MSG_REQUEST;
-       request->rq_export = NULL;
 
        request->rq_req_cbid.cbid_fn = request_out_callback;
        request->rq_req_cbid.cbid_arg = request;
@@ -624,6 +621,8 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
        request->rq_reply_cbid.cbid_arg = request;
 
        request->rq_reply_deadline = 0;
+       request->rq_bulk_deadline = 0;
+       request->rq_req_deadline = 0;
        request->rq_phase = RQ_PHASE_NEW;
        request->rq_next_phase = RQ_PHASE_UNDEFINED;
 
@@ -632,40 +631,49 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
 
        ptlrpc_at_set_req_timeout(request);
 
-       spin_lock_init(&request->rq_lock);
-       INIT_LIST_HEAD(&request->rq_list);
-       INIT_LIST_HEAD(&request->rq_timed_list);
-       INIT_LIST_HEAD(&request->rq_replay_list);
-       INIT_LIST_HEAD(&request->rq_ctx_chain);
-       INIT_LIST_HEAD(&request->rq_set_chain);
-       INIT_LIST_HEAD(&request->rq_history_list);
-       INIT_LIST_HEAD(&request->rq_exp_list);
-       init_waitqueue_head(&request->rq_reply_waitq);
-       init_waitqueue_head(&request->rq_set_waitq);
        request->rq_xid = ptlrpc_next_xid();
-       atomic_set(&request->rq_refcount, 1);
-
        lustre_msg_set_opc(request->rq_reqmsg, opcode);
 
+       /* Let's setup deadline for req/reply/bulk unlink for opcode. */
+       if (cfs_fail_val == opcode) {
+               time_t *fail_t = NULL, *fail2_t = NULL;
+
+               if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
+                       fail_t = &request->rq_bulk_deadline;
+               } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
+                       fail_t = &request->rq_reply_deadline;
+               } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK)) {
+                       fail_t = &request->rq_req_deadline;
+               } else if (CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BOTH_UNLINK)) {
+                       fail_t = &request->rq_reply_deadline;
+                       fail2_t = &request->rq_bulk_deadline;
+               }
+
+               if (fail_t) {
+                       *fail_t = ktime_get_real_seconds() + LONG_UNLINK;
+
+                       if (fail2_t)
+                               *fail2_t = ktime_get_real_seconds() +
+                                                LONG_UNLINK;
+
+                       /* The RPC is infected, let the test change the
+                        * fail_loc
+                        */
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       schedule_timeout(cfs_time_seconds(2));
+                       set_current_state(TASK_RUNNING);
+               }
+       }
+
        return 0;
+
 out_ctx:
+       LASSERT(!request->rq_pool);
        sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
 out_free:
        class_import_put(imp);
        return rc;
 }
-
-int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
-                            __u32 version, int opcode, char **bufs,
-                            struct ptlrpc_cli_ctx *ctx)
-{
-       int count;
-
-       count = req_capsule_filled_sizes(&request->rq_pill, RCL_CLIENT);
-       return __ptlrpc_request_bufs_pack(request, version, opcode, count,
-                                         request->rq_pill.rc_area[RCL_CLIENT],
-                                         bufs, ctx);
-}
 EXPORT_SYMBOL(ptlrpc_request_bufs_pack);
 
 /**
@@ -722,7 +730,9 @@ struct ptlrpc_request *__ptlrpc_request_alloc(struct obd_import *imp,
                request = ptlrpc_prep_req_from_pool(pool);
 
        if (request) {
-               LASSERTF((unsigned long)imp > 0x1000, "%p\n", imp);
+               ptlrpc_cli_req_init(request);
+
+               LASSERTF((unsigned long)imp > 0x1000, "%p", imp);
                LASSERT(imp != LP_POISON);
                LASSERTF((unsigned long)imp->imp_client > 0x1000, "%p\n",
                         imp->imp_client);
@@ -1163,9 +1173,9 @@ static int after_reply(struct ptlrpc_request *req)
 
        LASSERT(obd);
        /* repbuf must be unlinked */
-       LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink);
+       LASSERT(!req->rq_receiving_reply && req->rq_reply_unlinked);
 
-       if (req->rq_reply_truncate) {
+       if (req->rq_reply_truncated) {
                if (ptlrpc_no_resend(req)) {
                        DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d",
                                  req->rq_nob_received, req->rq_repbuf_len);
@@ -1239,8 +1249,9 @@ static int after_reply(struct ptlrpc_request *req)
        }
 
        ktime_get_real_ts64(&work_start);
-       timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC +
-                  (work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC;
+       timediff = (work_start.tv_sec - req->rq_sent_tv.tv_sec) * USEC_PER_SEC +
+                  (work_start.tv_nsec - req->rq_sent_tv.tv_nsec) /
+                                                                NSEC_PER_USEC;
        if (obd->obd_svc_stats) {
                lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
                                    timediff);
@@ -1503,16 +1514,28 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                if (!(req->rq_phase == RQ_PHASE_RPC ||
                      req->rq_phase == RQ_PHASE_BULK ||
                      req->rq_phase == RQ_PHASE_INTERPRET ||
-                     req->rq_phase == RQ_PHASE_UNREGISTERING ||
+                     req->rq_phase == RQ_PHASE_UNREG_RPC ||
+                     req->rq_phase == RQ_PHASE_UNREG_BULK ||
                      req->rq_phase == RQ_PHASE_COMPLETE)) {
                        DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
                        LBUG();
                }
 
-               if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+               if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
+                   req->rq_phase == RQ_PHASE_UNREG_BULK) {
                        LASSERT(req->rq_next_phase != req->rq_phase);
                        LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
 
+                       if (req->rq_req_deadline &&
+                           !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REQ_UNLINK))
+                               req->rq_req_deadline = 0;
+                       if (req->rq_reply_deadline &&
+                           !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK))
+                               req->rq_reply_deadline = 0;
+                       if (req->rq_bulk_deadline &&
+                           !OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK))
+                               req->rq_bulk_deadline = 0;
+
                        /*
                         * Skip processing until reply is unlinked. We
                         * can't return to pool before that and we can't
@@ -1520,7 +1543,10 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
                         * sure that all rdma transfers finished and will
                         * not corrupt any data.
                         */
-                       if (ptlrpc_client_recv_or_unlink(req) ||
+                       if (req->rq_phase == RQ_PHASE_UNREG_RPC &&
+                           ptlrpc_client_recv_or_unlink(req))
+                               continue;
+                       if (req->rq_phase == RQ_PHASE_UNREG_BULK &&
                            ptlrpc_client_bulk_active(req))
                                continue;
 
@@ -1998,7 +2024,7 @@ void ptlrpc_interrupted_set(void *data)
                        list_entry(tmp, struct ptlrpc_request, rq_set_chain);
 
                if (req->rq_phase != RQ_PHASE_RPC &&
-                   req->rq_phase != RQ_PHASE_UNREGISTERING)
+                   req->rq_phase != RQ_PHASE_UNREG_RPC)
                        continue;
 
                ptlrpc_mark_interrupted(req);
@@ -2195,11 +2221,11 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
 {
        if (!request)
                return;
+       LASSERT(!request->rq_srv_req);
+       LASSERT(!request->rq_export);
        LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
-       LASSERTF(!request->rq_rqbd, "req %p\n", request);/* client-side */
        LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
        LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
-       LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
        LASSERTF(!request->rq_replay, "req %p\n", request);
 
        req_capsule_fini(&request->rq_pill);
@@ -2225,10 +2251,7 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
 
        if (request->rq_repbuf)
                sptlrpc_cli_free_repbuf(request);
-       if (request->rq_export) {
-               class_export_put(request->rq_export);
-               request->rq_export = NULL;
-       }
+
        if (request->rq_import) {
                class_import_put(request->rq_import);
                request->rq_import = NULL;
@@ -2313,8 +2336,9 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
 
        /* Let's setup deadline for reply unlink. */
        if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
-           async && request->rq_reply_deadline == 0)
-               request->rq_reply_deadline = ktime_get_real_seconds()+LONG_UNLINK;
+           async && request->rq_reply_deadline == 0 && cfs_fail_val == 0)
+               request->rq_reply_deadline =
+                       ktime_get_real_seconds() + LONG_UNLINK;
 
        /* Nothing left to do. */
        if (!ptlrpc_client_recv_or_unlink(request))
@@ -2327,7 +2351,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
                return 1;
 
        /* Move to "Unregistering" phase as reply was not unlinked yet. */
-       ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
+       ptlrpc_rqphase_move(request, RQ_PHASE_UNREG_RPC);
 
        /* Do not wait for unlink to finish. */
        if (async)
@@ -2359,9 +2383,10 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
 
                LASSERT(rc == -ETIMEDOUT);
                DEBUG_REQ(D_WARNING, request,
-                         "Unexpectedly long timeout rvcng=%d unlnk=%d/%d",
+                         "Unexpectedly long timeout receiving_reply=%d req_ulinked=%d reply_unlinked=%d",
                          request->rq_receiving_reply,
-                         request->rq_req_unlink, request->rq_reply_unlink);
+                         request->rq_req_unlinked,
+                         request->rq_reply_unlinked);
        }
        return 0;
 }
@@ -2618,11 +2643,6 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req)
 }
 EXPORT_SYMBOL(ptlrpc_queue_wait);
 
-struct ptlrpc_replay_async_args {
-       int praa_old_state;
-       int praa_old_status;
-};
-
 /**
  * Callback used for replayed requests reply processing.
  * In case of successful reply calls registered request replay callback.
@@ -2961,7 +2981,6 @@ static void ptlrpcd_add_work_req(struct ptlrpc_request *req)
        req->rq_timeout         = obd_timeout;
        req->rq_sent            = ktime_get_real_seconds();
        req->rq_deadline        = req->rq_sent + req->rq_timeout;
-       req->rq_reply_deadline  = req->rq_deadline;
        req->rq_phase           = RQ_PHASE_INTERPRET;
        req->rq_next_phase      = RQ_PHASE_COMPLETE;
        req->rq_xid             = ptlrpc_next_xid();
@@ -3017,27 +3036,17 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
                return ERR_PTR(-ENOMEM);
        }
 
+       ptlrpc_cli_req_init(req);
+
        req->rq_send_state = LUSTRE_IMP_FULL;
        req->rq_type = PTL_RPC_MSG_REQUEST;
        req->rq_import = class_import_get(imp);
-       req->rq_export = NULL;
        req->rq_interpret_reply = work_interpreter;
        /* don't want reply */
-       req->rq_receiving_reply = 0;
-       req->rq_req_unlink = req->rq_reply_unlink = 0;
-       req->rq_no_delay = req->rq_no_resend = 1;
+       req->rq_no_delay = 1;
+       req->rq_no_resend = 1;
        req->rq_pill.rc_fmt = (void *)&worker_format;
 
-       spin_lock_init(&req->rq_lock);
-       INIT_LIST_HEAD(&req->rq_list);
-       INIT_LIST_HEAD(&req->rq_replay_list);
-       INIT_LIST_HEAD(&req->rq_set_chain);
-       INIT_LIST_HEAD(&req->rq_history_list);
-       INIT_LIST_HEAD(&req->rq_exp_list);
-       init_waitqueue_head(&req->rq_reply_waitq);
-       init_waitqueue_head(&req->rq_set_waitq);
-       atomic_set(&req->rq_refcount, 1);
-
        CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
        args = ptlrpc_req_async_args(req);
        args->cb = cb;
index a14daff3fca035c4975c3aab1110b09303cc5f57..177a379da9fa4809b01832a296c42086e31c2c9c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index fdcde9bbd788e71bf2d4df18d3552ddebea6cff3..b1ce72511509e31fa2790054a005d67d241af7ec 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -55,27 +51,33 @@ void request_out_callback(lnet_event_t *ev)
 {
        struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
        struct ptlrpc_request *req = cbid->cbid_arg;
+       bool wakeup = false;
 
-       LASSERT(ev->type == LNET_EVENT_SEND ||
-               ev->type == LNET_EVENT_UNLINK);
+       LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
        LASSERT(ev->unlinked);
 
        DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
 
        sptlrpc_request_out_callback(req);
+
        spin_lock(&req->rq_lock);
        req->rq_real_sent = ktime_get_real_seconds();
-       if (ev->unlinked)
-               req->rq_req_unlink = 0;
+       req->rq_req_unlinked = 1;
+       /* reply_in_callback happened before request_out_callback? */
+       if (req->rq_reply_unlinked)
+               wakeup = true;
 
        if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
                /* Failed send: make it seem like the reply timed out, just
                 * like failing sends in client.c does currently...
                 */
-
                req->rq_net_err = 1;
-               ptlrpc_client_wake_req(req);
+               wakeup = true;
        }
+
+       if (wakeup)
+               ptlrpc_client_wake_req(req);
+
        spin_unlock(&req->rq_lock);
 
        ptlrpc_req_finished(req);
@@ -104,7 +106,7 @@ void reply_in_callback(lnet_event_t *ev)
        req->rq_receiving_reply = 0;
        req->rq_early = 0;
        if (ev->unlinked)
-               req->rq_reply_unlink = 0;
+               req->rq_reply_unlinked = 1;
 
        if (ev->status)
                goto out_wake;
@@ -118,7 +120,7 @@ void reply_in_callback(lnet_event_t *ev)
        if (ev->mlength < ev->rlength) {
                CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
                       req->rq_replen, ev->rlength, ev->offset);
-               req->rq_reply_truncate = 1;
+               req->rq_reply_truncated = 1;
                req->rq_replied = 1;
                req->rq_status = -EOVERFLOW;
                req->rq_nob_received = ev->rlength + ev->offset;
@@ -135,7 +137,8 @@ void reply_in_callback(lnet_event_t *ev)
 
                req->rq_early_count++; /* number received, client side */
 
-               if (req->rq_replied)   /* already got the real reply */
+               /* already got the real reply or buffers are already unlinked */
+               if (req->rq_replied || req->rq_reply_unlinked == 1)
                        goto out_wake;
 
                req->rq_early = 1;
@@ -328,6 +331,7 @@ void request_in_callback(lnet_event_t *ev)
                }
        }
 
+       ptlrpc_srv_req_init(req);
        /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
         * flags are reset and scalars are zero.  We only set the message
         * size to non-zero if this was a successful receive.
@@ -341,10 +345,6 @@ void request_in_callback(lnet_event_t *ev)
        req->rq_self = ev->target.nid;
        req->rq_rqbd = rqbd;
        req->rq_phase = RQ_PHASE_NEW;
-       spin_lock_init(&req->rq_lock);
-       INIT_LIST_HEAD(&req->rq_timed_list);
-       INIT_LIST_HEAD(&req->rq_exp_list);
-       atomic_set(&req->rq_refcount, 1);
        if (ev->type == LNET_EVENT_PUT)
                CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
                       req, req->rq_xid, ev->mlength);
index a4f7544f46b89ff807516ae0b4cf52e5b1f67ca0..3292e6ea0102aa5ad9d28e842abe642fe3c0b377 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -360,9 +356,8 @@ void ptlrpc_invalidate_import(struct obd_import *imp)
                                                  "still on delayed list");
                                }
 
-                               CERROR("%s: RPCs in \"%s\" phase found (%d). Network is sluggish? Waiting them to error out.\n",
+                               CERROR("%s: Unregistering RPCs found (%d). Network is sluggish? Waiting them to error out.\n",
                                       cli_tgt,
-                                      ptlrpc_phase2str(RQ_PHASE_UNREGISTERING),
                                       atomic_read(&imp->
                                                   imp_unregistering));
                        }
@@ -698,7 +693,8 @@ int ptlrpc_connect_import(struct obd_import *imp)
 
        lustre_msg_add_op_flags(request->rq_reqmsg, MSG_CONNECT_NEXT_VER);
 
-       request->rq_no_resend = request->rq_no_delay = 1;
+       request->rq_no_resend = 1;
+       request->rq_no_delay = 1;
        request->rq_send_state = LUSTRE_IMP_CONNECTING;
        /* Allow a slightly larger reply for future growth compatibility */
        req_capsule_set_size(&request->rq_pill, &RMF_CONNECT_DATA, RCL_SERVER,
index c0ecd1625dc4054be2b67e9464f5bbdac7eab29b..ab5d8517424530bae3be1847e60958b5453b63f1 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -198,7 +194,7 @@ static const struct req_msg_field *mds_reint_create_slave_client[] = {
        &RMF_DLM_REQ
 };
 
-static const struct req_msg_field *mds_reint_create_rmt_acl_client[] = {
+static const struct req_msg_field *mds_reint_create_acl_client[] = {
        &RMF_PTLRPC_BODY,
        &RMF_REC_REINT,
        &RMF_CAPA1,
@@ -679,7 +675,7 @@ static struct req_format *req_formats[] = {
        &RQF_MDS_DONE_WRITING,
        &RQF_MDS_REINT,
        &RQF_MDS_REINT_CREATE,
-       &RQF_MDS_REINT_CREATE_RMT_ACL,
+       &RQF_MDS_REINT_CREATE_ACL,
        &RQF_MDS_REINT_CREATE_SLAVE,
        &RQF_MDS_REINT_CREATE_SYM,
        &RQF_MDS_REINT_OPEN,
@@ -1242,10 +1238,10 @@ struct req_format RQF_MDS_REINT_CREATE =
                        mds_reint_create_client, mdt_body_capa);
 EXPORT_SYMBOL(RQF_MDS_REINT_CREATE);
 
-struct req_format RQF_MDS_REINT_CREATE_RMT_ACL =
-       DEFINE_REQ_FMT0("MDS_REINT_CREATE_RMT_ACL",
-                       mds_reint_create_rmt_acl_client, mdt_body_capa);
-EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_RMT_ACL);
+struct req_format RQF_MDS_REINT_CREATE_ACL =
+       DEFINE_REQ_FMT0("MDS_REINT_CREATE_ACL",
+                       mds_reint_create_acl_client, mdt_body_capa);
+EXPORT_SYMBOL(RQF_MDS_REINT_CREATE_ACL);
 
 struct req_format RQF_MDS_REINT_CREATE_SLAVE =
        DEFINE_REQ_FMT0("MDS_REINT_CREATE_EA",
index a23ac5f9ae96eadacdd5e7ee58868e069001a5a1..0f55c01feba877da2615957555883a8e08ccbe03 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index fbccb62213b5217f5f98cc1523f447893a96a8cf..bccdace7e51fe4d70ab49882c56a543031a86cd8 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 64c0f1e17f36f89deef6dc41f8d4c35943bffa61..bc93b75744e1b466ebc5e781e3a28f95ebc78860 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -872,7 +868,8 @@ ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s,
 
                if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */
                        srhi->srhi_req = NULL;
-                       seq = srhi->srhi_seq = 0;
+                       seq = 0;
+                       srhi->srhi_seq = 0;
                } else { /* the next sequence */
                        seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits);
                }
@@ -1161,7 +1158,6 @@ void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
 
        lprocfs_counter_add(svc_stats, idx, bytes);
 }
-
 EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
 
 void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
index 10b8fe82a342cfb79f4d79a8cc98c852d82e97c5..11ec8254534741bd09026fb2f25b2e67198416ef 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -251,7 +247,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
 
        /* Let's setup deadline for reply unlink. */
        if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
-           async && req->rq_bulk_deadline == 0)
+           async && req->rq_bulk_deadline == 0 && cfs_fail_val == 0)
                req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
 
        if (ptlrpc_client_bulk_active(req) == 0)        /* completed or */
@@ -270,7 +266,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
                return 1;                               /* never registered */
 
        /* Move to "Unregistering" phase as bulk was not unlinked yet. */
-       ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
+       ptlrpc_rqphase_move(req, RQ_PHASE_UNREG_BULK);
 
        /* Do not wait for unlink to finish. */
        if (async)
@@ -581,19 +577,18 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
        }
 
        spin_lock(&request->rq_lock);
-       /* If the MD attach succeeds, there _will_ be a reply_in callback */
-       request->rq_receiving_reply = !noreply;
-       request->rq_req_unlink = 1;
        /* We are responsible for unlinking the reply buffer */
-       request->rq_reply_unlink = !noreply;
+       request->rq_reply_unlinked = noreply;
+       request->rq_receiving_reply = !noreply;
        /* Clear any flags that may be present from previous sends. */
+       request->rq_req_unlinked = 0;
        request->rq_replied = 0;
        request->rq_err = 0;
        request->rq_timedout = 0;
        request->rq_net_err = 0;
        request->rq_resend = 0;
        request->rq_restart = 0;
-       request->rq_reply_truncate = 0;
+       request->rq_reply_truncated = 0;
        spin_unlock(&request->rq_lock);
 
        if (!noreply) {
@@ -608,7 +603,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                reply_md.user_ptr = &request->rq_reply_cbid;
                reply_md.eq_handle = ptlrpc_eq_h;
 
-               /* We must see the unlink callback to unset rq_reply_unlink,
+               /* We must see the unlink callback to set rq_reply_unlinked,
                 * so we can't auto-unlink
                 */
                rc = LNetMDAttach(reply_me_h, reply_md, LNET_RETAIN,
@@ -637,7 +632,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
 
        OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
 
-       ktime_get_real_ts64(&request->rq_arrival_time);
+       ktime_get_real_ts64(&request->rq_sent_tv);
        request->rq_sent = ktime_get_real_seconds();
        /* We give the server rq_timeout secs to process the req, and
         * add the network latency for our local timeout.
@@ -655,9 +650,10 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
                          connection,
                          request->rq_request_portal,
                          request->rq_xid, 0);
-       if (rc == 0)
+       if (likely(rc == 0))
                goto out;
 
+       request->rq_req_unlinked = 1;
        ptlrpc_req_finished(request);
        if (noreply)
                goto out;
index c444f516856faf66a79091a38f9f91b00b909a92..d88faf61e740d560e00d02aa8c334bc98b653ad5 100644 (file)
@@ -769,7 +769,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
        spin_unlock(&nrs->nrs_lock);
 
        if (rc != 0)
-               (void) nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
+               (void)nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
 
        return rc;
 }
index 811acf6fc7860fd39b5b68ffea7948884111380e..b514f18fae5032f9788db0513427981160c41395 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -1806,19 +1802,6 @@ void lustre_swab_obd_quotactl(struct obd_quotactl *q)
 }
 EXPORT_SYMBOL(lustre_swab_obd_quotactl);
 
-void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p)
-{
-       __swab32s(&p->rp_uid);
-       __swab32s(&p->rp_gid);
-       __swab32s(&p->rp_fsuid);
-       __swab32s(&p->rp_fsuid_h);
-       __swab32s(&p->rp_fsgid);
-       __swab32s(&p->rp_fsgid_h);
-       __swab32s(&p->rp_access_perm);
-       __swab32s(&p->rp_padding);
-};
-EXPORT_SYMBOL(lustre_swab_mdt_remote_perm);
-
 void lustre_swab_fid2path(struct getinfo_fid2path *gf)
 {
        lustre_swab_lu_fid(&gf->gf_fid);
index ec3af109a1d7c4e55cbee7d166c23938b120e4cd..6c820e9441719c18632e2cec521b38a648b46b1c 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 8a869315c258459adda507cb455b791b5b177177..c0529d808d81c52edde0cbf3bf3b3d7848bc099b 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -57,7 +53,8 @@ ptlrpc_prep_ping(struct obd_import *imp)
                                        LUSTRE_OBD_VERSION, OBD_PING);
        if (req) {
                ptlrpc_request_set_replen(req);
-               req->rq_no_resend = req->rq_no_delay = 1;
+               req->rq_no_resend = 1;
+               req->rq_no_delay = 1;
        }
        return req;
 }
index 6ca26c98de1bb8c2086b8e28358a31279e83ae3c..a9831fab80f3bd3ee5585c9c8e36edb5d1dca843 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -292,4 +288,47 @@ static inline void ptlrpc_reqset_put(struct ptlrpc_request_set *set)
        if (atomic_dec_and_test(&set->set_refcount))
                kfree(set);
 }
+
+/** initialise ptlrpc common fields */
+static inline void ptlrpc_req_comm_init(struct ptlrpc_request *req)
+{
+       spin_lock_init(&req->rq_lock);
+       atomic_set(&req->rq_refcount, 1);
+       INIT_LIST_HEAD(&req->rq_list);
+       INIT_LIST_HEAD(&req->rq_replay_list);
+}
+
+/** initialise client side ptlrpc request */
+static inline void ptlrpc_cli_req_init(struct ptlrpc_request *req)
+{
+       struct ptlrpc_cli_req *cr = &req->rq_cli;
+
+       ptlrpc_req_comm_init(req);
+
+       req->rq_receiving_reply = 0;
+       req->rq_req_unlinked = 1;
+       req->rq_reply_unlinked = 1;
+
+       req->rq_receiving_reply = 0;
+       req->rq_req_unlinked = 1;
+       req->rq_reply_unlinked = 1;
+
+       INIT_LIST_HEAD(&cr->cr_set_chain);
+       INIT_LIST_HEAD(&cr->cr_ctx_chain);
+       init_waitqueue_head(&cr->cr_reply_waitq);
+       init_waitqueue_head(&cr->cr_set_waitq);
+}
+
+/** initialise server side ptlrpc request */
+static inline void ptlrpc_srv_req_init(struct ptlrpc_request *req)
+{
+       struct ptlrpc_srv_req *sr = &req->rq_srv;
+
+       ptlrpc_req_comm_init(req);
+       req->rq_srv_req = 1;
+       INIT_LIST_HEAD(&sr->sr_exp_list);
+       INIT_LIST_HEAD(&sr->sr_timed_list);
+       INIT_LIST_HEAD(&sr->sr_hist_list);
+}
+
 #endif /* PTLRPC_INTERNAL_H */
index a8ec0e9d7b2ee7f296548d9b374aa761418ca279..a70d5843f30ebb65196876e2c6f4ffa81f91c365 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 76a355a9db8b9e1a8d3bbf2385beeda9dc8b3d8d..0a374b6c2f71c335ba4dab0a83efe9170e4331ce 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -161,9 +157,9 @@ static int ptlrpcd_users;
 
 void ptlrpcd_wake(struct ptlrpc_request *req)
 {
-       struct ptlrpc_request_set *rq_set = req->rq_set;
+       struct ptlrpc_request_set *set = req->rq_set;
 
-       wake_up(&rq_set->set_waitq);
+       wake_up(&set->set_waitq);
 }
 EXPORT_SYMBOL(ptlrpcd_wake);
 
index 30d9a164e52dcb5b8e91ded3fc455312cc371595..718b3a8d61c658ff4a5fb84a15c318721a75a360 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 187fd1d6898c36ce3e97dac4167199dd7075b85a..dbd819fa6b755ddeadd83a4e81e6a6342f5eea0a 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -867,11 +863,9 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
        if (!req)
                return -ENOMEM;
 
-       spin_lock_init(&req->rq_lock);
+       ptlrpc_cli_req_init(req);
        atomic_set(&req->rq_refcount, 10000);
-       INIT_LIST_HEAD(&req->rq_ctx_chain);
-       init_waitqueue_head(&req->rq_reply_waitq);
-       init_waitqueue_head(&req->rq_set_waitq);
+
        req->rq_import = imp;
        req->rq_flvr = sec->ps_flvr;
        req->rq_cli_ctx = ctx;
@@ -1051,6 +1045,8 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
        if (!early_req)
                return -ENOMEM;
 
+       ptlrpc_cli_req_init(early_req);
+
        early_size = req->rq_nob_received;
        early_bufsz = size_roundup_power2(early_size);
        early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS);
@@ -1099,12 +1095,11 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
        memcpy(early_buf, req->rq_repbuf, early_size);
        spin_unlock(&req->rq_lock);
 
-       spin_lock_init(&early_req->rq_lock);
        early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
        early_req->rq_flvr = req->rq_flvr;
        early_req->rq_repbuf = early_buf;
        early_req->rq_repbuf_len = early_bufsz;
-       early_req->rq_repdata = (struct lustre_msg *) early_buf;
+       early_req->rq_repdata = (struct lustre_msg *)early_buf;
        early_req->rq_repdata_len = early_size;
        early_req->rq_early = 1;
        early_req->rq_reqmsg = req->rq_reqmsg;
@@ -1556,7 +1551,7 @@ void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
        /* move from segment + 1 to end segment */
        LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
        oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
-       movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
+       movesize = oldmsg_size - ((unsigned long)src - (unsigned long)msg);
        LASSERT(movesize >= 0);
 
        if (movesize)
@@ -2196,6 +2191,9 @@ int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
 
        pud = lustre_msg_buf(msg, offset, 0);
 
+       if (!pud)
+               return -EINVAL;
+
        pud->pud_uid = from_kuid(&init_user_ns, current_uid());
        pud->pud_gid = from_kgid(&init_user_ns, current_gid());
        pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
index 02e6cda4c995ae1f5d977b245506cfb217c8ce36..5f4d797185895f107bce05f1e676a0014e393fd4 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -273,7 +269,7 @@ static unsigned long enc_pools_shrink_scan(struct shrinker *s,
 static inline
 int npages_to_npools(unsigned long npages)
 {
-       return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
+       return (int)((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
 }
 
 /*
index a51b18bbfd347b10fe337b730b7346ef311be1c8..c14035479c5f7cc43bd824de8ebf06b887d94ce9 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -648,7 +644,7 @@ static int logname2fsname(const char *logname, char *buf, int buflen)
                return -EINVAL;
        }
 
-       len = min((int) (ptr - logname), buflen - 1);
+       len = min((int)(ptr - logname), buflen - 1);
 
        memcpy(buf, logname, len);
        buf[len] = '\0';
@@ -819,7 +815,7 @@ void sptlrpc_conf_client_adapt(struct obd_device *obd)
        CDEBUG(D_SEC, "obd %s\n", obd->u.cli.cl_target_uuid.uuid);
 
        /* serialize with connect/disconnect import */
-       down_read(&obd->u.cli.cl_sem);
+       down_read_nested(&obd->u.cli.cl_sem, OBD_CLI_SEM_MDCOSC);
 
        imp = obd->u.cli.cl_import;
        if (imp) {
index 9082da06b28a16453be8fe5f78868a60a26d3585..9b9801ece582bdec62b49683270f0998ca31c8c7 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index e610a8ddd223b945b3ffbfa7434556cec55f161f..07273f57796914bc256cd8c1e7e75d40666f6d22 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index 40e5349de38cf39c499711365fe175808fd83030..70a61e12bb7b2b9037b9d7ed59cdee46fa8ace4d 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -60,7 +56,7 @@ static struct ptlrpc_svc_ctx    null_svc_ctx;
 static inline
 void null_encode_sec_part(struct lustre_msg *msg, enum lustre_sec_part sp)
 {
-       msg->lm_secflvr |= (((__u32) sp) & 0xFF) << 24;
+       msg->lm_secflvr |= (((__u32)sp) & 0xFF) << 24;
 }
 
 static inline
@@ -265,7 +261,8 @@ int null_enlarge_reqbuf(struct ptlrpc_sec *sec,
                memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen);
 
                kvfree(req->rq_reqbuf);
-               req->rq_reqbuf = req->rq_reqmsg = newbuf;
+               req->rq_reqbuf = newbuf;
+               req->rq_reqmsg = newbuf;
                req->rq_reqbuf_len = alloc_size;
 
                if (req->rq_import)
@@ -329,7 +326,7 @@ int null_alloc_rs(struct ptlrpc_request *req, int msgsize)
        rs->rs_svc_ctx = req->rq_svc_ctx;
        atomic_inc(&req->rq_svc_ctx->sc_refcount);
 
-       rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+       rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
        rs->rs_repbuf_len = rs_size - sizeof(*rs);
        rs->rs_msg = rs->rs_repbuf;
 
index 37c9f4c453de0128f6329648f77b09d6a337693c..5c4590b0c5216ac42ae5d50bed20b20d5b5fb4e0 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -298,7 +294,7 @@ int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
        LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
 
        bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
-       token = (struct plain_bulk_token *) bsd->bsd_data;
+       token = (struct plain_bulk_token *)bsd->bsd_data;
 
        bsd->bsd_version = 0;
        bsd->bsd_flags = 0;
@@ -343,7 +339,7 @@ int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
        LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
 
        bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
-       tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+       tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
 
        if (req->rq_bulk_write) {
                if (bsdv->bsd_flags & BSD_FL_ERR)
@@ -574,8 +570,12 @@ int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
        lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
        req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
 
-       if (req->rq_pack_udesc)
-               sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
+       if (req->rq_pack_udesc) {
+               int rc = sptlrpc_pack_user_desc(req->rq_reqbuf,
+                                             PLAIN_PACK_USER_OFF);
+               if (rc < 0)
+                       return rc;
+       }
 
        return 0;
 }
@@ -811,7 +811,7 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
 
        rs->rs_svc_ctx = req->rq_svc_ctx;
        atomic_inc(&req->rq_svc_ctx->sc_refcount);
-       rs->rs_repbuf = (struct lustre_msg *) (rs + 1);
+       rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
        rs->rs_repbuf_len = rs_size - sizeof(*rs);
 
        lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
@@ -891,7 +891,7 @@ int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
        LASSERT(req->rq_pack_bulk);
 
        bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
-       tokenr = (struct plain_bulk_token *) bsdr->bsd_data;
+       tokenr = (struct plain_bulk_token *)bsdr->bsd_data;
        bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
 
        bsdv->bsd_version = 0;
@@ -926,7 +926,7 @@ int plain_svc_wrap_bulk(struct ptlrpc_request *req,
 
        bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
        bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
-       tokenv = (struct plain_bulk_token *) bsdv->bsd_data;
+       tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
 
        bsdv->bsd_version = 0;
        bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
index 17c7b9749f67f3eefc55ab6eb5451a08a26fc711..4788c4940c2aba30a7669b6b60f28f377cb68403 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
index aacc8108391d9c4af387a06b54ecdadf3236f373..6cc2b2edf3fc5bda048d4d5fe46d4fa4e8062132 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -1269,8 +1265,6 @@ void lustre_assert_wire_constants(void)
                 OBD_MD_FLXATTRRM);
        LASSERTF(OBD_MD_FLACL == (0x0000008000000000ULL), "found 0x%.16llxULL\n",
                 OBD_MD_FLACL);
-       LASSERTF(OBD_MD_FLRMTPERM == (0x0000010000000000ULL), "found 0x%.16llxULL\n",
-                OBD_MD_FLRMTPERM);
        LASSERTF(OBD_MD_FLMDSCAPA == (0x0000020000000000ULL), "found 0x%.16llxULL\n",
                 OBD_MD_FLMDSCAPA);
        LASSERTF(OBD_MD_FLOSSCAPA == (0x0000040000000000ULL), "found 0x%.16llxULL\n",
@@ -1281,14 +1275,6 @@ void lustre_assert_wire_constants(void)
                 OBD_MD_FLCROSSREF);
        LASSERTF(OBD_MD_FLGETATTRLOCK == (0x0000200000000000ULL), "found 0x%.16llxULL\n",
                 OBD_MD_FLGETATTRLOCK);
-       LASSERTF(OBD_MD_FLRMTLSETFACL == (0x0001000000000000ULL), "found 0x%.16llxULL\n",
-                OBD_MD_FLRMTLSETFACL);
-       LASSERTF(OBD_MD_FLRMTLGETFACL == (0x0002000000000000ULL), "found 0x%.16llxULL\n",
-                OBD_MD_FLRMTLGETFACL);
-       LASSERTF(OBD_MD_FLRMTRSETFACL == (0x0004000000000000ULL), "found 0x%.16llxULL\n",
-                OBD_MD_FLRMTRSETFACL);
-       LASSERTF(OBD_MD_FLRMTRGETFACL == (0x0008000000000000ULL), "found 0x%.16llxULL\n",
-                OBD_MD_FLRMTRGETFACL);
        LASSERTF(OBD_MD_FLDATAVERSION == (0x0010000000000000ULL), "found 0x%.16llxULL\n",
                 OBD_MD_FLDATAVERSION);
        CLASSERT(OBD_FL_INLINEDATA == 0x00000001);
@@ -1895,44 +1881,6 @@ void lustre_assert_wire_constants(void)
        LASSERTF((int)sizeof(((struct mdt_ioepoch *)0)->padding) == 4, "found %lld\n",
                 (long long)(int)sizeof(((struct mdt_ioepoch *)0)->padding));
 
-       /* Checks for struct mdt_remote_perm */
-       LASSERTF((int)sizeof(struct mdt_remote_perm) == 32, "found %lld\n",
-                (long long)(int)sizeof(struct mdt_remote_perm));
-       LASSERTF((int)offsetof(struct mdt_remote_perm, rp_uid) == 0, "found %lld\n",
-                (long long)(int)offsetof(struct mdt_remote_perm, rp_uid));
-       LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_uid) == 4, "found %lld\n",
-                (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_uid));
-       LASSERTF((int)offsetof(struct mdt_remote_perm, rp_gid) == 4, "found %lld\n",
-                (long long)(int)offsetof(struct mdt_remote_perm, rp_gid));
-       LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_gid) == 4, "found %lld\n",
-                (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_gid));
-       LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsuid) == 8, "found %lld\n",
-                (long long)(int)offsetof(struct mdt_remote_perm, rp_fsuid));
-       LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid) == 4, "found %lld\n",
-                (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsuid));
-       LASSERTF((int)offsetof(struct mdt_remote_perm, rp_fsgid) == 16, "found %lld\n",
-                (long long)(int)offsetof(struct mdt_remote_perm, rp_fsgid));
-       LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid) == 4, "found %lld\n",
-                (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_fsgid));
-       LASSERTF((int)offsetof(struct mdt_remote_perm, rp_access_perm) == 24, "found %lld\n",
-                (long long)(int)offsetof(struct mdt_remote_perm, rp_access_perm));
-       LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm) == 4, "found %lld\n",
-                (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_access_perm));
-       LASSERTF((int)offsetof(struct mdt_remote_perm, rp_padding) == 28, "found %lld\n",
-                (long long)(int)offsetof(struct mdt_remote_perm, rp_padding));
-       LASSERTF((int)sizeof(((struct mdt_remote_perm *)0)->rp_padding) == 4, "found %lld\n",
-                (long long)(int)sizeof(((struct mdt_remote_perm *)0)->rp_padding));
-       LASSERTF(CFS_SETUID_PERM == 0x00000001UL, "found 0x%.8xUL\n",
-               (unsigned)CFS_SETUID_PERM);
-       LASSERTF(CFS_SETGID_PERM == 0x00000002UL, "found 0x%.8xUL\n",
-               (unsigned)CFS_SETGID_PERM);
-       LASSERTF(CFS_SETGRP_PERM == 0x00000004UL, "found 0x%.8xUL\n",
-               (unsigned)CFS_SETGRP_PERM);
-       LASSERTF(CFS_RMTACL_PERM == 0x00000008UL, "found 0x%.8xUL\n",
-               (unsigned)CFS_RMTACL_PERM);
-       LASSERTF(CFS_RMTOWN_PERM == 0x00000010UL, "found 0x%.8xUL\n",
-               (unsigned)CFS_RMTOWN_PERM);
-
        /* Checks for struct mdt_rec_setattr */
        LASSERTF((int)sizeof(struct mdt_rec_setattr) == 136, "found %lld\n",
                 (long long)(int)sizeof(struct mdt_rec_setattr));
index 873e2cf312176cb1bceed54e27ec10d6909b9cd9..20206ba965affe333423de87fe6b470a026953ba 100644 (file)
@@ -294,6 +294,14 @@ Description:
                Controls extended attributes client-side cache.
                1 to enable, 0 to disable.
 
+What:          /sys/fs/lustre/llite/<fsname>-<uuid>/unstable_stats
+Date:          Apr 2016
+Contact:       "Oleg Drokin" <oleg.drokin@intel.com>
+Description:
+               Shows number of pages that were sent and acknowledged by
+               server but were not yet committed and therefore still
+               pinned in client memory even though no longer dirty.
+
 What:          /sys/fs/lustre/ldlm/cancel_unused_locks_before_replay
 Date:          May 2015
 Contact:       "Oleg Drokin" <oleg.drokin@intel.com>
index 68931e5ecd8f48adf503d9fabcdca795d2966c25..09e9499b7f9d79b6805c684ff5d6a617e1fcf250 100644 (file)
@@ -1799,8 +1799,8 @@ struct ieee80211_device {
        short scanning;
        short proto_started;
 
-       struct semaphore wx_sem;
-       struct semaphore scan_sem;
+       struct mutex wx_mutex;
+       struct mutex scan_mutex;
 
        spinlock_t mgmt_tx_lock;
        spinlock_t beacon_lock;
index d705595766a9d8670cec9b029ce466798398973f..49db1b75cd05a0c7936df55add4dcce71cbb61d9 100644 (file)
@@ -427,7 +427,7 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
        short ch = 0;
        u8 channel_map[MAX_CHANNEL_NUMBER+1];
        memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
-       down(&ieee->scan_sem);
+       mutex_lock(&ieee->scan_mutex);
 
        while(1)
        {
@@ -475,13 +475,13 @@ void ieee80211_softmac_scan_syncro(struct ieee80211_device *ieee)
 out:
        if(ieee->state < IEEE80211_LINKED){
                ieee->actscanning = false;
-               up(&ieee->scan_sem);
+               mutex_unlock(&ieee->scan_mutex);
        }
        else{
        ieee->sync_scan_hurryup = 0;
        if(IS_DOT11D_ENABLE(ieee))
                DOT11D_ScanComplete(ieee);
-       up(&ieee->scan_sem);
+       mutex_unlock(&ieee->scan_mutex);
 }
 }
 EXPORT_SYMBOL(ieee80211_softmac_scan_syncro);
@@ -495,7 +495,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
        memcpy(channel_map, GET_DOT11D_INFO(ieee)->channel_map, MAX_CHANNEL_NUMBER+1);
        if(!ieee->ieee_up)
                return;
-       down(&ieee->scan_sem);
+       mutex_lock(&ieee->scan_mutex);
        do{
                ieee->current_network.channel =
                        (ieee->current_network.channel + 1) % MAX_CHANNEL_NUMBER;
@@ -517,7 +517,7 @@ static void ieee80211_softmac_scan_wq(struct work_struct *work)
 
        schedule_delayed_work(&ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME);
 
-       up(&ieee->scan_sem);
+       mutex_unlock(&ieee->scan_mutex);
        return;
 out:
        if(IS_DOT11D_ENABLE(ieee))
@@ -525,7 +525,7 @@ out:
        ieee->actscanning = false;
        watchdog = 0;
        ieee->scanning = 0;
-       up(&ieee->scan_sem);
+       mutex_unlock(&ieee->scan_mutex);
 }
 
 
@@ -579,7 +579,7 @@ static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
 
        //ieee->sync_scan_hurryup = 1;
 
-       down(&ieee->scan_sem);
+       mutex_lock(&ieee->scan_mutex);
 //     spin_lock_irqsave(&ieee->lock, flags);
 
        if (ieee->scanning == 1) {
@@ -589,7 +589,7 @@ static void ieee80211_softmac_stop_scan(struct ieee80211_device *ieee)
        }
 
 //     spin_unlock_irqrestore(&ieee->lock, flags);
-       up(&ieee->scan_sem);
+       mutex_unlock(&ieee->scan_mutex);
 }
 
 void ieee80211_stop_scan(struct ieee80211_device *ieee)
@@ -621,7 +621,7 @@ static void ieee80211_start_scan(struct ieee80211_device *ieee)
 
 }
 
-/* called with wx_sem held */
+/* called with wx_mutex held */
 void ieee80211_start_scan_syncro(struct ieee80211_device *ieee)
 {
        if (IS_DOT11D_ENABLE(ieee) )
@@ -1389,7 +1389,7 @@ static void ieee80211_associate_procedure_wq(struct work_struct *work)
 {
        struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, associate_procedure_wq);
        ieee->sync_scan_hurryup = 1;
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
 
        if (ieee->data_hard_stop)
                ieee->data_hard_stop(ieee->dev);
@@ -1402,7 +1402,7 @@ static void ieee80211_associate_procedure_wq(struct work_struct *work)
        ieee->associate_seq = 1;
        ieee80211_associate_step1(ieee);
 
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
 }
 
 inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee80211_network *net)
@@ -2331,7 +2331,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
        struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, start_ibss_wq);
        /* iwconfig mode ad-hoc will schedule this and return
         * on the other hand this will block further iwconfig SET
-        * operations because of the wx_sem hold.
+        * operations because of the wx_mutex hold.
         * Anyway some most set operations set a flag to speed-up
         * (abort) this wq (when syncro scanning) before sleeping
         * on the semaphore
@@ -2340,7 +2340,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
                printk("==========oh driver down return\n");
                return;
        }
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
 
        if (ieee->current_network.ssid_len == 0) {
                strcpy(ieee->current_network.ssid, IEEE80211_DEFAULT_TX_ESSID);
@@ -2431,7 +2431,7 @@ static void ieee80211_start_ibss_wq(struct work_struct *work)
                ieee->data_hard_resume(ieee->dev);
        netif_carrier_on(ieee->dev);
 
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
 }
 
 inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
@@ -2439,7 +2439,7 @@ inline void ieee80211_start_ibss(struct ieee80211_device *ieee)
        schedule_delayed_work(&ieee->start_ibss_wq, 150);
 }
 
-/* this is called only in user context, with wx_sem held */
+/* this is called only in user context, with wx_mutex held */
 void ieee80211_start_bss(struct ieee80211_device *ieee)
 {
        unsigned long flags;
@@ -2505,7 +2505,7 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
        struct ieee80211_device *ieee = container_of(dwork, struct ieee80211_device, associate_retry_wq);
        unsigned long flags;
 
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
        if(!ieee->proto_started)
                goto exit;
 
@@ -2537,7 +2537,7 @@ static void ieee80211_associate_retry_wq(struct work_struct *work)
        spin_unlock_irqrestore(&ieee->lock, flags);
 
 exit:
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
 }
 
 struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
@@ -2583,9 +2583,9 @@ EXPORT_SYMBOL(ieee80211_get_beacon);
 void ieee80211_softmac_stop_protocol(struct ieee80211_device *ieee)
 {
        ieee->sync_scan_hurryup = 1;
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
        ieee80211_stop_protocol(ieee);
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
 }
 EXPORT_SYMBOL(ieee80211_softmac_stop_protocol);
 
@@ -2609,9 +2609,9 @@ void ieee80211_stop_protocol(struct ieee80211_device *ieee)
 void ieee80211_softmac_start_protocol(struct ieee80211_device *ieee)
 {
        ieee->sync_scan_hurryup = 0;
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
        ieee80211_start_protocol(ieee);
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
 }
 EXPORT_SYMBOL(ieee80211_softmac_start_protocol);
 
@@ -2728,8 +2728,8 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
        INIT_WORK(&ieee->wx_sync_scan_wq, ieee80211_wx_sync_scan_wq);
 
 
-       sema_init(&ieee->wx_sem, 1);
-       sema_init(&ieee->scan_sem, 1);
+       mutex_init(&ieee->wx_mutex);
+       mutex_init(&ieee->scan_mutex);
 
        spin_lock_init(&ieee->mgmt_tx_lock);
        spin_lock_init(&ieee->beacon_lock);
@@ -2742,14 +2742,14 @@ void ieee80211_softmac_init(struct ieee80211_device *ieee)
 
 void ieee80211_softmac_free(struct ieee80211_device *ieee)
 {
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
        kfree(ieee->pDot11dInfo);
        ieee->pDot11dInfo = NULL;
        del_timer_sync(&ieee->associate_timer);
 
        cancel_delayed_work(&ieee->associate_retry_wq);
 
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
 }
 
 /********************************************************
@@ -3138,7 +3138,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
        struct ieee_param *param;
        int ret=0;
 
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
        //IEEE_DEBUG_INFO("wpa_supplicant: len=%d\n", p->length);
 
        if (p->length < sizeof(struct ieee_param) || !p->pointer) {
@@ -3183,7 +3183,7 @@ int ieee80211_wpa_supplicant_ioctl(struct ieee80211_device *ieee, struct iw_poin
 
        kfree(param);
 out:
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
 
        return ret;
 }
index aad288a1f9e3ab39fe2de6f21dac2fb11d224725..21bd0dc40888e13e438eefbf1a0e62f32d8c81b7 100644 (file)
@@ -34,7 +34,7 @@ int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info
        int ret;
        struct iw_freq *fwrq = &wrqu->freq;
 
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
 
        if (ieee->iw_mode == IW_MODE_INFRA) {
                ret = -EOPNOTSUPP;
@@ -79,7 +79,7 @@ int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info
 
        ret = 0;
 out:
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
        return ret;
 }
 EXPORT_SYMBOL(ieee80211_wx_set_freq);
@@ -145,7 +145,7 @@ int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
 
        ieee->sync_scan_hurryup = 1;
 
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
        /* use ifconfig hw ether */
        if (ieee->iw_mode == IW_MODE_MASTER) {
                ret = -1;
@@ -173,7 +173,7 @@ int ieee80211_wx_set_wap(struct ieee80211_device *ieee,
        if (ifup)
                ieee80211_start_protocol(ieee);
 out:
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
        return ret;
 }
 EXPORT_SYMBOL(ieee80211_wx_set_wap);
@@ -274,7 +274,7 @@ int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info
 
        ieee->sync_scan_hurryup = 1;
 
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
 
        if (wrqu->mode == ieee->iw_mode)
                goto out;
@@ -293,7 +293,7 @@ int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info
        }
 
 out:
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
        return 0;
 }
 EXPORT_SYMBOL(ieee80211_wx_set_mode);
@@ -353,7 +353,7 @@ void ieee80211_wx_sync_scan_wq(struct work_struct *work)
                ieee80211_start_send_beacons(ieee);
 
        netif_carrier_on(ieee->dev);
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
 
 }
 
@@ -362,7 +362,7 @@ int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info
 {
        int ret = 0;
 
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
 
        if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)) {
                ret = -1;
@@ -376,7 +376,7 @@ int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info
        }
 
 out:
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
        return ret;
 }
 EXPORT_SYMBOL(ieee80211_wx_set_scan);
@@ -391,7 +391,7 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
        unsigned long flags;
 
        ieee->sync_scan_hurryup = 1;
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
 
        proto_started = ieee->proto_started;
 
@@ -430,7 +430,7 @@ int ieee80211_wx_set_essid(struct ieee80211_device *ieee,
        if (proto_started)
                ieee80211_start_protocol(ieee);
 out:
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
        return ret;
 }
 EXPORT_SYMBOL(ieee80211_wx_set_essid);
@@ -453,7 +453,7 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
        int enable = (parms[0] > 0);
        short prev = ieee->raw_tx;
 
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
 
        if (enable)
                ieee->raw_tx = 1;
@@ -475,7 +475,7 @@ int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee,
                        netif_carrier_off(ieee->dev);
        }
 
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
 
        return 0;
 }
@@ -514,7 +514,7 @@ int ieee80211_wx_set_power(struct ieee80211_device *ieee,
 {
        int ret = 0;
 
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
 
        if (wrqu->power.disabled) {
                ieee->ps = IEEE80211_PS_DISABLED;
@@ -553,7 +553,7 @@ int ieee80211_wx_set_power(struct ieee80211_device *ieee,
 
        }
 exit:
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
        return ret;
 
 }
@@ -564,7 +564,7 @@ int ieee80211_wx_get_power(struct ieee80211_device *ieee,
                                 struct iw_request_info *info,
                                 union iwreq_data *wrqu, char *extra)
 {
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
 
        if (ieee->ps == IEEE80211_PS_DISABLED) {
                wrqu->power.disabled = 1;
@@ -592,7 +592,7 @@ int ieee80211_wx_get_power(struct ieee80211_device *ieee,
                wrqu->power.flags |= IW_POWER_UNICAST_R;
 
 exit:
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
        return 0;
 
 }
index 208be5fc527ae18e77ece946c25df9c05254e568..563d7fed6e1c37b222a3d92e53038a217353988c 100644 (file)
@@ -253,7 +253,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
        int i = 0;
        int err = 0;
        IEEE80211_DEBUG_WX("Getting scan\n");
-       down(&ieee->wx_sem);
+       mutex_lock(&ieee->wx_mutex);
        spin_lock_irqsave(&ieee->lock, flags);
 
        list_for_each_entry(network, &ieee->network_list, list) {
@@ -262,7 +262,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
                {
                        err = -E2BIG;
                        break;
-                                                                                               }
+               }
                if (ieee->scan_age == 0 ||
                    time_after(network->last_scanned + ieee->scan_age, jiffies))
                        ev = rtl819x_translate_scan(ieee, ev, stop, network, info);
@@ -277,7 +277,7 @@ int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
        }
 
        spin_unlock_irqrestore(&ieee->lock, flags);
-       up(&ieee->wx_sem);
+       mutex_unlock(&ieee->wx_mutex);
        wrqu->data.length = ev -  extra;
        wrqu->data.flags = 0;
 
index 97d9b3f49114143c74d69f63d6e427d4b8f7537f..f35defc36fd9d53587c1fd1a86dc5ec0784f78f0 100644 (file)
 static void eprom_cs(struct net_device *dev, short bit)
 {
        u8 cmdreg;
+       int err;
 
-       read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+       err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+       if (err)
+               return;
        if (bit)
                /* enable EPROM */
                write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CS_BIT);
@@ -40,8 +43,11 @@ static void eprom_cs(struct net_device *dev, short bit)
 static void eprom_ck_cycle(struct net_device *dev)
 {
        u8 cmdreg;
+       int err;
 
-       read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+       err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+       if (err)
+               return;
        write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_CK_BIT);
        force_pci_posting(dev);
        udelay(EPROM_DELAY);
@@ -56,8 +62,11 @@ static void eprom_ck_cycle(struct net_device *dev)
 static void eprom_w(struct net_device *dev, short bit)
 {
        u8 cmdreg;
+       int err;
 
-       read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+       err = read_nic_byte_E(dev, EPROM_CMD, &cmdreg);
+       if (err)
+               return;
        if (bit)
                write_nic_byte_E(dev, EPROM_CMD, cmdreg | EPROM_W_BIT);
        else
@@ -71,8 +80,12 @@ static void eprom_w(struct net_device *dev, short bit)
 static short eprom_r(struct net_device *dev)
 {
        u8 bit;
+       int err;
+
+       err = read_nic_byte_E(dev, EPROM_CMD, &bit);
+       if (err)
+               return err;
 
-       read_nic_byte_E(dev, EPROM_CMD, &bit);
        udelay(EPROM_DELAY);
 
        if (bit & EPROM_R_BIT)
@@ -93,7 +106,7 @@ static void eprom_send_bits_string(struct net_device *dev, short b[], int len)
 }
 
 
-u32 eprom_read(struct net_device *dev, u32 addr)
+int eprom_read(struct net_device *dev, u32 addr)
 {
        struct r8192_priv *priv = ieee80211_priv(dev);
        short read_cmd[] = {1, 1, 0};
@@ -101,6 +114,7 @@ u32 eprom_read(struct net_device *dev, u32 addr)
        int i;
        int addr_len;
        u32 ret;
+       int err;
 
        ret = 0;
        /* enable EPROM programming */
@@ -144,7 +158,11 @@ u32 eprom_read(struct net_device *dev, u32 addr)
                 * and reading data. (eeprom outs a dummy 0)
                 */
                eprom_ck_cycle(dev);
-               ret |= (eprom_r(dev)<<(15-i));
+               err = eprom_r(dev);
+               if (err < 0)
+                       return err;
+
+               ret |= err<<(15-i);
        }
 
        eprom_cs(dev, 0);
index b840348eb5e3f7d11dca3d3e0d5988aa51048f02..9cf7f587c3ab167efb9c489f14675ff314b2ecd1 100644 (file)
@@ -40,4 +40,4 @@
 #define EPROM_TXPW1 0x3d
 
 
-u32 eprom_read(struct net_device *dev, u32 addr); /* reads a 16 bits word */
+int eprom_read(struct net_device *dev, u32 addr); /* reads a 16 bits word */
index ee1c72267811ee46b54d9cb891e5dbc661f9aacf..821afc0ddac5c5dbff612c9024d6c9cfcde9f156 100644 (file)
@@ -879,8 +879,7 @@ typedef struct r8192_priv {
        /* If 1, allow bad crc frame, reception in monitor mode */
        short crcmon;
 
-       struct semaphore wx_sem;
-       struct semaphore rf_sem;        /* Used to lock rf write operation */
+       struct mutex wx_mutex;
 
        u8 rf_type;                     /* 0: 1T2R, 1: 2T4R */
        RT_RF_TYPE_819xU rf_chip;
@@ -1129,10 +1128,10 @@ int read_nic_byte(struct net_device *dev, int x, u8 *data);
 int read_nic_byte_E(struct net_device *dev, int x, u8 *data);
 int read_nic_dword(struct net_device *dev, int x, u32 *data);
 int read_nic_word(struct net_device *dev, int x, u16 *data);
-void write_nic_byte(struct net_device *dev, int x, u8 y);
-void write_nic_byte_E(struct net_device *dev, int x, u8 y);
-void write_nic_word(struct net_device *dev, int x, u16 y);
-void write_nic_dword(struct net_device *dev, int x, u32 y);
+int write_nic_byte(struct net_device *dev, int x, u8 y);
+int write_nic_byte_E(struct net_device *dev, int x, u8 y);
+int write_nic_word(struct net_device *dev, int x, u16 y);
+int write_nic_dword(struct net_device *dev, int x, u32 y);
 void force_pci_posting(struct net_device *dev);
 
 void rtl8192_rtx_disable(struct net_device *);
index 8c1d73719147f0962b6f28ed279d04fd6e889d24..dd0970facdf5bd5bfbe80fdd48bb30faab178d08 100644 (file)
@@ -253,7 +253,7 @@ u32 read_cam(struct net_device *dev, u8 addr)
        return data;
 }
 
-void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
+int write_nic_byte_E(struct net_device *dev, int indx, u8 data)
 {
        int status;
        struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -261,7 +261,7 @@ void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
        u8 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
 
        if (!usbdata)
-               return;
+               return -ENOMEM;
        *usbdata = data;
 
        status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -269,9 +269,12 @@ void write_nic_byte_E(struct net_device *dev, int indx, u8 data)
                                 indx | 0xfe00, 0, usbdata, 1, HZ / 2);
        kfree(usbdata);
 
-       if (status < 0)
+       if (status < 0){
                netdev_err(dev, "write_nic_byte_E TimeOut! status: %d\n",
                           status);
+               return status;
+       }
+       return 0;
 }
 
 int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
@@ -299,7 +302,7 @@ int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
 }
 
 /* as 92U has extend page from 4 to 16, so modify functions below. */
-void write_nic_byte(struct net_device *dev, int indx, u8 data)
+int write_nic_byte(struct net_device *dev, int indx, u8 data)
 {
        int status;
 
@@ -308,7 +311,7 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
        u8 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
 
        if (!usbdata)
-               return;
+               return -ENOMEM;
        *usbdata = data;
 
        status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -317,12 +320,16 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
                                 usbdata, 1, HZ / 2);
        kfree(usbdata);
 
-       if (status < 0)
+       if (status < 0) {
                netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status);
+               return status;
+       }
+
+       return 0;
 }
 
 
-void write_nic_word(struct net_device *dev, int indx, u16 data)
+int write_nic_word(struct net_device *dev, int indx, u16 data)
 {
        int status;
 
@@ -331,7 +338,7 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
        u16 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
 
        if (!usbdata)
-               return;
+               return -ENOMEM;
        *usbdata = data;
 
        status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -340,12 +347,16 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
                                 usbdata, 2, HZ / 2);
        kfree(usbdata);
 
-       if (status < 0)
+       if (status < 0) {
                netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status);
+               return status;
+       }
+
+       return 0;
 }
 
 
-void write_nic_dword(struct net_device *dev, int indx, u32 data)
+int write_nic_dword(struct net_device *dev, int indx, u32 data)
 {
        int status;
 
@@ -354,7 +365,7 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
        u32 *usbdata = kzalloc(sizeof(data), GFP_KERNEL);
 
        if (!usbdata)
-               return;
+               return -ENOMEM;
        *usbdata = data;
 
        status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
@@ -364,9 +375,13 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
        kfree(usbdata);
 
 
-       if (status < 0)
+       if (status < 0) {
                netdev_err(dev, "write_nic_dword TimeOut! status: %d\n",
                           status);
+               return status;
+       }
+
+       return 0;
 }
 
 
@@ -2361,8 +2376,7 @@ static void rtl8192_init_priv_lock(struct r8192_priv *priv)
 {
        spin_lock_init(&priv->tx_lock);
        spin_lock_init(&priv->irq_lock);
-       sema_init(&priv->wx_sem, 1);
-       sema_init(&priv->rf_sem, 1);
+       mutex_init(&priv->wx_mutex);
        mutex_init(&priv->mutex);
 }
 
@@ -2421,7 +2435,7 @@ static inline u16 endian_swap(u16 *data)
        return *data;
 }
 
-static void rtl8192_read_eeprom_info(struct net_device *dev)
+static int rtl8192_read_eeprom_info(struct net_device *dev)
 {
        u16 wEPROM_ID = 0;
        u8 bMac_Tmp_Addr[6] = {0x00, 0xe0, 0x4c, 0x00, 0x00, 0x02};
@@ -2429,9 +2443,13 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
        struct r8192_priv *priv = ieee80211_priv(dev);
        u16 tmpValue = 0;
        int i;
+       int ret;
 
        RT_TRACE(COMP_EPROM, "===========>%s()\n", __func__);
-       wEPROM_ID = eprom_read(dev, 0); /* first read EEPROM ID out; */
+       ret = eprom_read(dev, 0); /* first read EEPROM ID out; */
+       if (ret < 0)
+               return ret;
+       wEPROM_ID = (u16)ret;
        RT_TRACE(COMP_EPROM, "EEPROM ID is 0x%x\n", wEPROM_ID);
 
        if (wEPROM_ID != RTL8190_EEPROM_ID)
@@ -2443,13 +2461,25 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
 
        if (bLoad_From_EEPOM) {
                tmpValue = eprom_read(dev, EEPROM_VID >> 1);
+               ret = eprom_read(dev, EEPROM_VID >> 1);
+               if (ret < 0)
+                       return ret;
+               tmpValue = (u16)ret;
                priv->eeprom_vid = endian_swap(&tmpValue);
-               priv->eeprom_pid = eprom_read(dev, EEPROM_PID >> 1);
-               tmpValue = eprom_read(dev, EEPROM_ChannelPlan >> 1);
+               ret = eprom_read(dev, EEPROM_PID >> 1);
+               if (ret < 0)
+                       return ret;
+               priv->eeprom_pid = (u16)ret;
+               ret = eprom_read(dev, EEPROM_ChannelPlan >> 1);
+               if (ret < 0)
+                       return ret;
+               tmpValue = (u16)ret;
                priv->eeprom_ChannelPlan = (tmpValue & 0xff00) >> 8;
                priv->btxpowerdata_readfromEEPORM = true;
-               priv->eeprom_CustomerID =
-                       eprom_read(dev, (EEPROM_Customer_ID >> 1)) >> 8;
+               ret = eprom_read(dev, (EEPROM_Customer_ID >> 1)) >> 8;
+               if (ret < 0)
+                       return ret;
+               priv->eeprom_CustomerID = (u16)ret;
        } else {
                priv->eeprom_vid = 0;
                priv->eeprom_pid = 0;
@@ -2467,10 +2497,10 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
                int i;
 
                for (i = 0; i < 6; i += 2) {
-                       u16 tmp = 0;
-
-                       tmp = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
-                       *(u16 *)(&dev->dev_addr[i]) = tmp;
+                       ret = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
+                       if (ret < 0)
+                               return ret;
+                       *(u16 *)(&dev->dev_addr[i]) = (u16)ret;
                }
        } else {
                memcpy(dev->dev_addr, bMac_Tmp_Addr, 6);
@@ -2482,52 +2512,72 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
 
        if (priv->card_8192_version == (u8)VERSION_819xU_A) {
                /* read Tx power gain offset of legacy OFDM to HT rate */
-               if (bLoad_From_EEPOM)
-                       priv->EEPROMTxPowerDiff = (eprom_read(dev, (EEPROM_TxPowerDiff >> 1)) & 0xff00) >> 8;
-               else
+               if (bLoad_From_EEPOM) {
+                       ret = eprom_read(dev, (EEPROM_TxPowerDiff >> 1));
+                       if (ret < 0)
+                               return ret;
+                       priv->EEPROMTxPowerDiff = ((u16)ret & 0xff00) >> 8;
+               } else
                        priv->EEPROMTxPowerDiff = EEPROM_Default_TxPower;
                RT_TRACE(COMP_EPROM, "TxPowerDiff:%d\n", priv->EEPROMTxPowerDiff);
                /* read ThermalMeter from EEPROM */
-               if (bLoad_From_EEPOM)
-                       priv->EEPROMThermalMeter = (u8)(eprom_read(dev, (EEPROM_ThermalMeter >> 1)) & 0x00ff);
-               else
+               if (bLoad_From_EEPOM) {
+                       ret = eprom_read(dev, (EEPROM_ThermalMeter >> 1));
+                       if (ret < 0)
+                               return ret;
+                       priv->EEPROMThermalMeter = (u8)((u16)ret & 0x00ff);
+               } else
                        priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
                RT_TRACE(COMP_EPROM, "ThermalMeter:%d\n", priv->EEPROMThermalMeter);
                /* for tx power track */
                priv->TSSI_13dBm = priv->EEPROMThermalMeter * 100;
                /* read antenna tx power offset of B/C/D to A from EEPROM */
-               if (bLoad_From_EEPOM)
-                       priv->EEPROMPwDiff = (eprom_read(dev, (EEPROM_PwDiff >> 1)) & 0x0f00) >> 8;
-               else
+               if (bLoad_From_EEPOM) {
+                       ret = eprom_read(dev, (EEPROM_PwDiff >> 1));
+                       if (ret < 0)
+                               return ret;
+                       priv->EEPROMPwDiff = ((u16)ret & 0x0f00) >> 8;
+               } else
                        priv->EEPROMPwDiff = EEPROM_Default_PwDiff;
                RT_TRACE(COMP_EPROM, "TxPwDiff:%d\n", priv->EEPROMPwDiff);
                /* Read CrystalCap from EEPROM */
-               if (bLoad_From_EEPOM)
-                       priv->EEPROMCrystalCap = (eprom_read(dev, (EEPROM_CrystalCap >> 1)) & 0x0f);
-               else
+               if (bLoad_From_EEPOM) {
+                       ret = eprom_read(dev, (EEPROM_CrystalCap >> 1));
+                       if (ret < 0)
+                               return ret;
+                       priv->EEPROMCrystalCap = (u16)ret & 0x0f;
+               } else
                        priv->EEPROMCrystalCap = EEPROM_Default_CrystalCap;
                RT_TRACE(COMP_EPROM, "CrystalCap = %d\n", priv->EEPROMCrystalCap);
                /* get per-channel Tx power level */
-               if (bLoad_From_EEPOM)
-                       priv->EEPROM_Def_Ver = (eprom_read(dev, (EEPROM_TxPwIndex_Ver >> 1)) & 0xff00) >> 8;
-               else
+               if (bLoad_From_EEPOM) {
+                       ret = eprom_read(dev, (EEPROM_TxPwIndex_Ver >> 1));
+                       if (ret < 0)
+                               return ret;
+                       priv->EEPROM_Def_Ver = ((u16)ret & 0xff00) >> 8;
+               } else
                        priv->EEPROM_Def_Ver = 1;
                RT_TRACE(COMP_EPROM, "EEPROM_DEF_VER:%d\n", priv->EEPROM_Def_Ver);
                if (priv->EEPROM_Def_Ver == 0) { /* old eeprom definition */
                        int i;
 
-                       if (bLoad_From_EEPOM)
-                               priv->EEPROMTxPowerLevelCCK = (eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1)) & 0xff) >> 8;
-                       else
+                       if (bLoad_From_EEPOM) {
+                               ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK >> 1));
+                               if (ret < 0)
+                                       return ret;
+                               priv->EEPROMTxPowerLevelCCK = ((u16)ret & 0xff) >> 8;
+                       } else
                                priv->EEPROMTxPowerLevelCCK = 0x10;
                        RT_TRACE(COMP_EPROM, "CCK Tx Power Levl: 0x%02x\n", priv->EEPROMTxPowerLevelCCK);
                        for (i = 0; i < 3; i++) {
                                if (bLoad_From_EEPOM) {
-                                       tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
+                                       ret = eprom_read(dev, (EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
+                                       if ( ret < 0)
+                                               return ret;
                                        if (((EEPROM_TxPwIndex_OFDM_24G + i) % 2) == 0)
-                                               tmpValue = tmpValue & 0x00ff;
+                                               tmpValue = (u16)ret & 0x00ff;
                                        else
-                                               tmpValue = (tmpValue & 0xff00) >> 8;
+                                               tmpValue = ((u16)ret & 0xff00) >> 8;
                                } else {
                                        tmpValue = 0x10;
                                }
@@ -2536,17 +2586,21 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
                        }
                } else if (priv->EEPROM_Def_Ver == 1) {
                        if (bLoad_From_EEPOM) {
-                               tmpValue = eprom_read(dev,
-                                               EEPROM_TxPwIndex_CCK_V1 >> 1);
-                               tmpValue = (tmpValue & 0xff00) >> 8;
+                               ret = eprom_read(dev, EEPROM_TxPwIndex_CCK_V1 >> 1);
+                               if (ret < 0)
+                                       return ret;
+                               tmpValue = ((u16)ret & 0xff00) >> 8;
                        } else {
                                tmpValue = 0x10;
                        }
                        priv->EEPROMTxPowerLevelCCK_V1[0] = (u8)tmpValue;
 
-                       if (bLoad_From_EEPOM)
-                               tmpValue = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1 + 2) >> 1);
-                       else
+                       if (bLoad_From_EEPOM) {
+                               ret = eprom_read(dev, (EEPROM_TxPwIndex_CCK_V1 + 2) >> 1);
+                               if (ret < 0)
+                                       return ret;
+                               tmpValue = (u16)ret;
+                       } else
                                tmpValue = 0x1010;
                        *((u16 *)(&priv->EEPROMTxPowerLevelCCK_V1[1])) = tmpValue;
                        if (bLoad_From_EEPOM)
@@ -2644,6 +2698,8 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
        init_rate_adaptive(dev);
 
        RT_TRACE(COMP_EPROM, "<===========%s()\n", __func__);
+
+       return 0;
 }
 
 static short rtl8192_get_channel_map(struct net_device *dev)
@@ -2664,6 +2720,7 @@ static short rtl8192_get_channel_map(struct net_device *dev)
 static short rtl8192_init(struct net_device *dev)
 {
        struct r8192_priv *priv = ieee80211_priv(dev);
+       int err;
 
        memset(&(priv->stats), 0, sizeof(struct Stats));
        memset(priv->txqueue_to_outpipemap, 0, 9);
@@ -2685,7 +2742,14 @@ static short rtl8192_init(struct net_device *dev)
        rtl8192_init_priv_lock(priv);
        rtl8192_init_priv_task(dev);
        rtl8192_get_eeprom_size(dev);
-       rtl8192_read_eeprom_info(dev);
+       err = rtl8192_read_eeprom_info(dev);
+       if (err) {
+               DMESG("Reading EEPROM info failed");
+               kfree(priv->pFirmware);
+               priv->pFirmware = NULL;
+               free_ieee80211(dev);
+               return err;
+       }
        rtl8192_get_channel_map(dev);
        init_hal_dm(dev);
        setup_timer(&priv->watch_dog_timer, watch_dog_timer_callback,
@@ -3303,12 +3367,12 @@ RESET_START:
 
                /* Set the variable for reset. */
                priv->ResetProgress = RESET_TYPE_SILENT;
-               down(&priv->wx_sem);
+               mutex_lock(&priv->wx_mutex);
                if (priv->up == 0) {
                        RT_TRACE(COMP_ERR,
                                 "%s():the driver is not up! return\n",
                                 __func__);
-                       up(&priv->wx_sem);
+                       mutex_unlock(&priv->wx_mutex);
                        return;
                }
                priv->up = 0;
@@ -3323,19 +3387,19 @@ RESET_START:
 
                ieee->sync_scan_hurryup = 1;
                if (ieee->state == IEEE80211_LINKED) {
-                       down(&ieee->wx_sem);
+                       mutex_lock(&ieee->wx_mutex);
                        netdev_dbg(dev, "ieee->state is IEEE80211_LINKED\n");
                        ieee80211_stop_send_beacons(priv->ieee80211);
                        del_timer_sync(&ieee->associate_timer);
                        cancel_delayed_work(&ieee->associate_retry_wq);
                        ieee80211_stop_scan(ieee);
                        netif_carrier_off(dev);
-                       up(&ieee->wx_sem);
+                       mutex_unlock(&ieee->wx_mutex);
                } else {
                        netdev_dbg(dev, "ieee->state is NOT LINKED\n");
                        ieee80211_softmac_stop_protocol(priv->ieee80211);
                }
-               up(&priv->wx_sem);
+               mutex_unlock(&priv->wx_mutex);
                RT_TRACE(COMP_RESET,
                         "%s():<==========down process is finished\n",
                         __func__);
@@ -3533,9 +3597,9 @@ static int rtl8192_open(struct net_device *dev)
        struct r8192_priv *priv = ieee80211_priv(dev);
        int ret;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
        ret = rtl8192_up(dev);
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return ret;
 }
 
@@ -3556,11 +3620,11 @@ static int rtl8192_close(struct net_device *dev)
        struct r8192_priv *priv = ieee80211_priv(dev);
        int ret;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = rtl8192_down(dev);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return ret;
 }
@@ -3632,11 +3696,11 @@ static void rtl8192_restart(struct work_struct *work)
                                               reset_wq);
        struct net_device *dev = priv->ieee80211->dev;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        rtl8192_commit(dev);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 }
 
 static void r8192_set_multicast(struct net_device *dev)
@@ -3659,12 +3723,12 @@ static int r8192_set_mac_adr(struct net_device *dev, void *mac)
        struct r8192_priv *priv = ieee80211_priv(dev);
        struct sockaddr *addr = mac;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ether_addr_copy(dev->dev_addr, addr->sa_data);
 
        schedule_work(&priv->reset_wq);
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return 0;
 }
@@ -3681,7 +3745,7 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        struct iw_point *p = &wrq->u.data;
        struct ieee_param *ipw = NULL;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
 
        if (p->length < sizeof(struct ieee_param) || !p->pointer) {
@@ -3774,7 +3838,7 @@ static int rtl8192_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
        kfree(ipw);
        ipw = NULL;
 out:
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return ret;
 }
 
index 837704de3ea423bbb5e7cf43d9ad933bb611f8d3..d2f2f246063fecbd24279e39cc2618b139587e9b 100644 (file)
@@ -67,11 +67,11 @@ static int r8192_wx_set_rate(struct net_device *dev,
        int ret;
        struct r8192_priv *priv = ieee80211_priv(dev);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_set_rate(priv->ieee80211, info, wrqu, extra);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return ret;
 }
@@ -83,11 +83,11 @@ static int r8192_wx_set_rts(struct net_device *dev,
        int ret;
        struct r8192_priv *priv = ieee80211_priv(dev);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_set_rts(priv->ieee80211, info, wrqu, extra);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return ret;
 }
@@ -108,11 +108,11 @@ static int r8192_wx_set_power(struct net_device *dev,
        int ret;
        struct r8192_priv *priv = ieee80211_priv(dev);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_set_power(priv->ieee80211, info, wrqu, extra);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return ret;
 }
@@ -132,11 +132,11 @@ static int r8192_wx_force_reset(struct net_device *dev,
 {
        struct r8192_priv *priv = ieee80211_priv(dev);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        netdev_dbg(dev, "%s(): force reset ! extra is %d\n", __func__, *extra);
        priv->force_reset = *extra;
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return 0;
 
 }
@@ -148,11 +148,11 @@ static int r8192_wx_set_rawtx(struct net_device *dev,
        struct r8192_priv *priv = ieee80211_priv(dev);
        int ret;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return ret;
 
@@ -166,7 +166,7 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
        int *parms = (int *)extra;
        int enable = (parms[0] > 0);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        if (enable)
                priv->crcmon = 1;
@@ -176,7 +176,7 @@ static int r8192_wx_set_crcmon(struct net_device *dev,
        DMESG("bad CRC in monitor mode are %s",
              priv->crcmon ? "accepted" : "rejected");
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return 0;
 }
@@ -187,13 +187,13 @@ static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
        struct r8192_priv *priv = ieee80211_priv(dev);
        int ret;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_set_mode(priv->ieee80211, a, wrqu, b);
 
        rtl8192_set_rxconf(dev);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return ret;
 }
 
@@ -338,7 +338,7 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
                }
        }
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
        if (priv->ieee80211->state != IEEE80211_LINKED) {
                priv->ieee80211->scanning = 0;
                ieee80211_softmac_scan_syncro(priv->ieee80211);
@@ -346,7 +346,7 @@ static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
        } else {
                ret = ieee80211_wx_set_scan(priv->ieee80211, a, wrqu, b);
        }
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return ret;
 }
 
@@ -361,11 +361,11 @@ static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
        if (!priv->up)
                return -ENETDOWN;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_get_scan(priv->ieee80211, a, wrqu, b);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return ret;
 }
@@ -377,11 +377,11 @@ static int r8192_wx_set_essid(struct net_device *dev,
        struct r8192_priv *priv = ieee80211_priv(dev);
        int ret;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_set_essid(priv->ieee80211, a, wrqu, b);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return ret;
 }
@@ -393,11 +393,11 @@ static int r8192_wx_get_essid(struct net_device *dev,
        int ret;
        struct r8192_priv *priv = ieee80211_priv(dev);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return ret;
 }
@@ -408,11 +408,11 @@ static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
        int ret;
        struct r8192_priv *priv = ieee80211_priv(dev);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return ret;
 }
 
@@ -468,11 +468,11 @@ static int r8192_wx_set_wap(struct net_device *dev,
        int ret;
        struct r8192_priv *priv = ieee80211_priv(dev);
        /* struct sockaddr *temp = (struct sockaddr *)awrq; */
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        ret = ieee80211_wx_set_wap(priv->ieee80211, info, awrq, extra);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return ret;
 
@@ -515,12 +515,12 @@ static int r8192_wx_set_enc(struct net_device *dev,
        if (!priv->up)
                return -ENETDOWN;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        RT_TRACE(COMP_SEC, "Setting SW wep key");
        ret = ieee80211_wx_set_encode(priv->ieee80211, info, wrqu, key);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
 
 
@@ -619,7 +619,7 @@ static int r8192_wx_set_retry(struct net_device *dev,
        struct r8192_priv *priv = ieee80211_priv(dev);
        int err = 0;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
 
        if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
            wrqu->retry.disabled){
@@ -652,7 +652,7 @@ static int r8192_wx_set_retry(struct net_device *dev,
 
        rtl8192_commit(dev);
 exit:
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return err;
 }
@@ -701,7 +701,7 @@ static int r8192_wx_set_sens(struct net_device *dev,
        struct r8192_priv *priv = ieee80211_priv(dev);
        short err = 0;
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
        if (priv->rf_set_sens == NULL) {
                err = -1; /* we have not this support for this radio */
                goto exit;
@@ -712,7 +712,7 @@ static int r8192_wx_set_sens(struct net_device *dev,
                err = -EINVAL;
 
 exit:
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
 
        return err;
 }
@@ -727,7 +727,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
        struct ieee80211_device *ieee = priv->ieee80211;
 
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
        ret = ieee80211_wx_set_encode_ext(priv->ieee80211, info, wrqu, extra);
 
        {
@@ -790,7 +790,7 @@ static int r8192_wx_set_enc_ext(struct net_device *dev,
 
 end_hw_sec:
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return ret;
 
 }
@@ -801,9 +801,9 @@ static int r8192_wx_set_auth(struct net_device *dev,
        int ret = 0;
        struct r8192_priv *priv = ieee80211_priv(dev);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
        ret = ieee80211_wx_set_auth(priv->ieee80211, info, &(data->param), extra);
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return ret;
 }
 
@@ -815,10 +815,10 @@ static int r8192_wx_set_mlme(struct net_device *dev,
        int ret = 0;
        struct r8192_priv *priv = ieee80211_priv(dev);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
        ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra);
 
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return ret;
 }
 
@@ -829,9 +829,9 @@ static int r8192_wx_set_gen_ie(struct net_device *dev,
        int ret = 0;
        struct r8192_priv *priv = ieee80211_priv(dev);
 
-       down(&priv->wx_sem);
+       mutex_lock(&priv->wx_mutex);
        ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->data.length);
-       up(&priv->wx_sem);
+       mutex_unlock(&priv->wx_mutex);
        return ret;
 
 
index b08b6ecc8d31e26e6b23d4b30f737c539178233f..98ea7f381a3c845c05bd84150c2fa2f5607fc6de 100644 (file)
@@ -22,7 +22,7 @@ __unisys_vmcall_gnuc(unsigned long tuple, unsigned long reg_ebx,
 
        cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
        if (!(cpuid_ecx & 0x80000000))
-               return -1;
+               return -EPERM;
 
        __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
                "a"(tuple), "b"(reg_ebx), "c"(reg_ecx));
@@ -40,7 +40,7 @@ __unisys_extended_vmcall_gnuc(unsigned long long tuple,
 
        cpuid(0x00000001, &cpuid_eax, &cpuid_ebx, &cpuid_ecx, &cpuid_edx);
        if (!(cpuid_ecx & 0x80000000))
-               return -1;
+               return -EPERM;
 
        __asm__ __volatile__(".byte 0x00f, 0x001, 0x0c1" : "=a"(result) :
                "a"(tuple), "b"(reg_ebx), "c"(reg_ecx), "d"(reg_edx));
index 3a147dbbd7b53e39bdb25b6a9897495b0574ba5d..d32b8980a1cfef7e80ae820c8f434a6f2b16dbce 100644 (file)
@@ -876,10 +876,10 @@ write_vbus_chp_info(struct visorchannel *chan,
        int off = sizeof(struct channel_header) + hdr_info->chp_info_offset;
 
        if (hdr_info->chp_info_offset == 0)
-               return -1;
+               return -EFAULT;
 
        if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
-               return -1;
+               return -EFAULT;
        return 0;
 }
 
@@ -895,10 +895,10 @@ write_vbus_bus_info(struct visorchannel *chan,
        int off = sizeof(struct channel_header) + hdr_info->bus_info_offset;
 
        if (hdr_info->bus_info_offset == 0)
-               return -1;
+               return -EFAULT;
 
        if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
-               return -1;
+               return -EFAULT;
        return 0;
 }
 
@@ -915,10 +915,10 @@ write_vbus_dev_info(struct visorchannel *chan,
            (hdr_info->device_info_struct_bytes * devix);
 
        if (hdr_info->dev_info_offset == 0)
-               return -1;
+               return -EFAULT;
 
        if (visorchannel_write(chan, off, info, sizeof(*info)) < 0)
-               return -1;
+               return -EFAULT;
        return 0;
 }
 
index 5ba5936e2203de44e8d733669985561e96ed6e83..d248c946a13b169dc877e7f53b5c5393779d479e 100644 (file)
@@ -1613,7 +1613,7 @@ parahotplug_request_complete(int id, u16 active)
        }
 
        spin_unlock(&parahotplug_request_list_lock);
-       return -1;
+       return -EINVAL;
 }
 
 /*
index 6a4570d1064259d5d733fc92dc599a90c49c9eb6..120ba2097e02d072d31ca5a9ae31e22c63b21ac6 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/debugfs.h>
 #include <linux/skbuff.h>
 #include <linux/kthread.h>
+#include <linux/idr.h>
+#include <linux/seq_file.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_cmnd.h>
@@ -33,7 +35,6 @@
 #define MAX_BUF                        8192
 #define MAX_PENDING_REQUESTS   (MIN_NUMSIGNALS * 2)
 #define VISORHBA_ERROR_COUNT   30
-#define VISORHBA_OPEN_MAX      1
 
 static int visorhba_queue_command_lck(struct scsi_cmnd *scsicmd,
                                      void (*visorhba_cmnd_done)
@@ -50,14 +51,7 @@ static int visorhba_pause(struct visor_device *dev,
 static int visorhba_resume(struct visor_device *dev,
                           visorbus_state_complete_func complete_func);
 
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
-                                size_t len, loff_t *offset);
-static int set_no_disk_inquiry_result(unsigned char *buf,
-                                     size_t len, bool is_lun0);
 static struct dentry *visorhba_debugfs_dir;
-static const struct file_operations debugfs_info_fops = {
-       .read = info_debugfs_read,
-};
 
 /* GUIDS for HBA channel type supported by this driver */
 static struct visor_channeltype_descriptor visorhba_channel_types[] = {
@@ -99,14 +93,6 @@ struct scsipending {
        char cmdtype;           /* Type of pointer that is being stored */
 };
 
-/* Work Data for dar_work_queue */
-struct diskaddremove {
-       u8 add;                 /* 0-remove, 1-add */
-       struct Scsi_Host *shost; /* Scsi Host for this visorhba instance */
-       u32 channel, id, lun;   /* Disk Path */
-       struct diskaddremove *next;
-};
-
 /* Each scsi_host has a host_data area that contains this struct. */
 struct visorhba_devdata {
        struct Scsi_Host *scsihost;
@@ -133,14 +119,21 @@ struct visorhba_devdata {
        int devnum;
        struct task_struct *thread;
        int thread_wait_ms;
+
+       /*
+        * allows us to pass int handles back-and-forth between us and
+        * iovm, instead of raw pointers
+        */
+       struct idr idr;
+
+       struct dentry *debugfs_dir;
+       struct dentry *debugfs_info;
 };
 
 struct visorhba_devices_open {
        struct visorhba_devdata *devdata;
 };
 
-static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
-
 #define for_each_vdisk_match(iter, list, match)                          \
        for (iter = &list->head; iter->next; iter = iter->next) \
                if ((iter->channel == match->channel) &&                  \
@@ -191,7 +184,7 @@ static void visor_thread_stop(struct task_struct *task)
  *     Partition so that it can be handled when it completes. If new is
  *     NULL it is assumed the entry refers only to the cmdrsp.
  *     Returns insert_location where entry was added,
- *     SCSI_MLQUEUE_DEVICE_BUSY if it can't
+ *     -EBUSY if it can't
  */
 static int add_scsipending_entry(struct visorhba_devdata *devdata,
                                 char cmdtype, void *new)
@@ -206,7 +199,7 @@ static int add_scsipending_entry(struct visorhba_devdata *devdata,
                insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
                if (insert_location == (int)devdata->nextinsert) {
                        spin_unlock_irqrestore(&devdata->privlock, flags);
-                       return -1;
+                       return -EBUSY;
                }
        }
 
@@ -268,6 +261,62 @@ static struct uiscmdrsp *get_scsipending_cmdrsp(struct visorhba_devdata *ddata,
        return NULL;
 }
 
+/**
+ *      simple_idr_get - associate a provided pointer with an int value
+ *                       1 <= value <= INT_MAX, and return this int value;
+ *                       the pointer value can be obtained later by passing
+ *                       this int value to idr_find()
+ *      @idrtable: the data object maintaining the pointer<-->int mappings
+ *      @p: the pointer value to be remembered
+ *      @lock: a spinlock used when exclusive access to idrtable is needed
+ */
+static unsigned int simple_idr_get(struct idr *idrtable, void *p,
+                                  spinlock_t *lock)
+{
+       int id;
+       unsigned long flags;
+
+       idr_preload(GFP_KERNEL);
+       spin_lock_irqsave(lock, flags);
+       id = idr_alloc(idrtable, p, 1, INT_MAX, GFP_NOWAIT);
+       spin_unlock_irqrestore(lock, flags);
+       idr_preload_end();
+       if (id < 0)
+               return 0;  /* failure */
+       return (unsigned int)(id);  /* idr_alloc() guarantees > 0 */
+}
+
+/**
+ *      setup_scsitaskmgmt_handles - stash the necessary handles so that the
+ *                                   completion processing logic for a taskmgmt
+ *                                   cmd will be able to find who to wake up
+ *                                   and where to stash the result
+ */
+static void setup_scsitaskmgmt_handles(struct idr *idrtable, spinlock_t *lock,
+                                      struct uiscmdrsp *cmdrsp,
+                                      wait_queue_head_t *event, int *result)
+{
+       /* specify the event that has to be triggered when this */
+       /* cmd is complete */
+       cmdrsp->scsitaskmgmt.notify_handle =
+               simple_idr_get(idrtable, event, lock);
+       cmdrsp->scsitaskmgmt.notifyresult_handle =
+               simple_idr_get(idrtable, result, lock);
+}
+
+/**
+ *      cleanup_scsitaskmgmt_handles - forget handles created by
+ *                                     setup_scsitaskmgmt_handles()
+ */
+static void cleanup_scsitaskmgmt_handles(struct idr *idrtable,
+                                        struct uiscmdrsp *cmdrsp)
+{
+       if (cmdrsp->scsitaskmgmt.notify_handle)
+               idr_remove(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
+       if (cmdrsp->scsitaskmgmt.notifyresult_handle)
+               idr_remove(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
+}
+
 /**
  *     forward_taskmgmt_command - send taskmegmt command to the Service
  *                                Partition
@@ -303,10 +352,8 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
 
        /* issue TASK_MGMT_ABORT_TASK */
        cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
-       /* specify the event that has to be triggered when this */
-       /* cmd is complete */
-       cmdrsp->scsitaskmgmt.notify_handle = (u64)&notifyevent;
-       cmdrsp->scsitaskmgmt.notifyresult_handle = (u64)&notifyresult;
+       setup_scsitaskmgmt_handles(&devdata->idr, &devdata->privlock, cmdrsp,
+                                  &notifyevent, &notifyresult);
 
        /* save destination */
        cmdrsp->scsitaskmgmt.tasktype = tasktype;
@@ -315,6 +362,8 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
        cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
        cmdrsp->scsitaskmgmt.handle = scsicmd_id;
 
+       dev_dbg(&scsidev->sdev_gendev,
+               "visorhba: initiating type=%d taskmgmt command\n", tasktype);
        if (!visorchannel_signalinsert(devdata->dev->visorchannel,
                                       IOCHAN_TO_IOPART,
                                       cmdrsp))
@@ -327,17 +376,23 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
                                msecs_to_jiffies(45000)))
                goto err_del_scsipending_ent;
 
+       dev_dbg(&scsidev->sdev_gendev,
+               "visorhba: taskmgmt type=%d success; result=0x%x\n",
+                tasktype, notifyresult);
        if (tasktype == TASK_MGMT_ABORT_TASK)
                scsicmd->result = DID_ABORT << 16;
        else
                scsicmd->result = DID_RESET << 16;
 
        scsicmd->scsi_done(scsicmd);
-
+       cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
        return SUCCESS;
 
 err_del_scsipending_ent:
+       dev_dbg(&scsidev->sdev_gendev,
+               "visorhba: taskmgmt type=%d not executed\n", tasktype);
        del_scsipending_ent(devdata, scsicmd_id);
+       cleanup_scsitaskmgmt_handles(&devdata->idr, cmdrsp);
        return FAILED;
 }
 
@@ -606,64 +661,76 @@ static struct scsi_host_template visorhba_driver_template = {
 };
 
 /**
- *     info_debugfs_read - debugfs interface to dump visorhba states
- *     @file: Debug file
- *     @buf: buffer to send back to user
- *     @len: len that can be written to buf
- *     @offset: offset into buf
+ *     info_debugfs_show - debugfs interface to dump visorhba states
  *
- *     Dumps information about the visorhba driver and devices
- *     TODO: Make this per vhba
- *     Returns bytes_read
+ *      This presents a file in the debugfs tree named:
+ *          /visorhba/vbus<x>:dev<y>/info
  */
-static ssize_t info_debugfs_read(struct file *file, char __user *buf,
-                                size_t len, loff_t *offset)
+static int info_debugfs_show(struct seq_file *seq, void *v)
 {
-       ssize_t bytes_read = 0;
-       int str_pos = 0;
-       u64 phys_flags_addr;
-       int i;
-       struct visorhba_devdata *devdata;
-       char *vbuf;
+       struct visorhba_devdata *devdata = seq->private;
+
+       seq_printf(seq, "max_buff_len = %u\n", devdata->max_buff_len);
+       seq_printf(seq, "interrupts_rcvd = %llu\n", devdata->interrupts_rcvd);
+       seq_printf(seq, "interrupts_disabled = %llu\n",
+                  devdata->interrupts_disabled);
+       seq_printf(seq, "interrupts_notme = %llu\n",
+                  devdata->interrupts_notme);
+       seq_printf(seq, "flags_addr = %p\n", devdata->flags_addr);
+       if (devdata->flags_addr) {
+               u64 phys_flags_addr =
+                       virt_to_phys((__force  void *)devdata->flags_addr);
+               seq_printf(seq, "phys_flags_addr = 0x%016llx\n",
+                          phys_flags_addr);
+               seq_printf(seq, "FeatureFlags = %llu\n",
+                          (__le64)readq(devdata->flags_addr));
+       }
+       seq_printf(seq, "acquire_failed_cnt = %llu\n",
+                  devdata->acquire_failed_cnt);
 
-       if (len > MAX_BUF)
-               len = MAX_BUF;
-       vbuf = kzalloc(len, GFP_KERNEL);
-       if (!vbuf)
-               return -ENOMEM;
+       return 0;
+}
+
+static int info_debugfs_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, info_debugfs_show, inode->i_private);
+}
 
-       for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
-               if (!visorhbas_open[i].devdata)
-                       continue;
-
-               devdata = visorhbas_open[i].devdata;
-
-               str_pos += scnprintf(vbuf + str_pos,
-                               len - str_pos, "max_buff_len:%u\n",
-                               devdata->max_buff_len);
-
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                               "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
-                               devdata->interrupts_rcvd,
-                               devdata->interrupts_disabled);
-               str_pos += scnprintf(vbuf + str_pos,
-                               len - str_pos, "\ninterrupts_notme = %llu,\n",
-                               devdata->interrupts_notme);
-               phys_flags_addr = virt_to_phys((__force  void *)
-                                              devdata->flags_addr);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos,
-                               "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
-                               devdata->flags_addr, phys_flags_addr,
-                               (__le64)readq(devdata->flags_addr));
-               str_pos += scnprintf(vbuf + str_pos,
-                       len - str_pos, "acquire_failed_cnt:%llu\n",
-                       devdata->acquire_failed_cnt);
-               str_pos += scnprintf(vbuf + str_pos, len - str_pos, "\n");
+static const struct file_operations info_debugfs_fops = {
+       .owner = THIS_MODULE,
+       .open = info_debugfs_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+/**
+ *     complete_taskmgmt_command - complete task management
+ *     @cmdrsp: Response from the IOVM
+ *
+ *     Service Partition returned the result of the task management
+ *     command. Wake up anyone waiting for it.
+ *     Returns void
+ */
+static inline void complete_taskmgmt_command
+(struct idr *idrtable, struct uiscmdrsp *cmdrsp, int result)
+{
+       wait_queue_head_t *wq =
+               idr_find(idrtable, cmdrsp->scsitaskmgmt.notify_handle);
+       int *scsi_result_ptr =
+               idr_find(idrtable, cmdrsp->scsitaskmgmt.notifyresult_handle);
+
+       if (unlikely(!(wq && scsi_result_ptr))) {
+               pr_err("visorhba: no completion context; cmd will time out\n");
+               return;
        }
 
-       bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
-       kfree(vbuf);
-       return bytes_read;
+       /* copy the result of the taskmgmt and
+        * wake up the error handler that is waiting for this
+        */
+       pr_debug("visorhba: notifying initiator with result=0x%x\n", result);
+       *scsi_result_ptr = result;
+       wake_up_all(wq);
 }
 
 /**
@@ -701,17 +768,8 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
                        break;
                case CMD_SCSITASKMGMT_TYPE:
                        cmdrsp = pendingdel->sent;
-                       cmdrsp->scsitaskmgmt.notifyresult_handle
-                                                       = TASK_MGMT_FAILED;
-                       wake_up_all((wait_queue_head_t *)
-                                   cmdrsp->scsitaskmgmt.notify_handle);
-                       break;
-               case CMD_VDISKMGMT_TYPE:
-                       cmdrsp = pendingdel->sent;
-                       cmdrsp->vdiskmgmt.notifyresult_handle
-                                                       = VDISK_MGMT_FAILED;
-                       wake_up_all((wait_queue_head_t *)
-                                   cmdrsp->vdiskmgmt.notify_handle);
+                       complete_taskmgmt_command(&devdata->idr, cmdrsp,
+                                                 TASK_MGMT_FAILED);
                        break;
                default:
                        break;
@@ -878,89 +936,6 @@ complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
        scsicmd->scsi_done(scsicmd);
 }
 
-/* DELETE VDISK TASK MGMT COMMANDS */
-static inline void complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
-{
-       /* copy the result of the taskmgmt and
-        * wake up the error handler that is waiting for this
-        */
-       cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
-       wake_up_all((wait_queue_head_t *)cmdrsp->vdiskmgmt.notify_handle);
-}
-
-/**
- *     complete_taskmgmt_command - complete task management
- *     @cmdrsp: Response from the IOVM
- *
- *     Service Partition returned the result of the task management
- *     command. Wake up anyone waiting for it.
- *     Returns void
- */
-static inline void complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
-{
-       /* copy the result of the taskgmgt and
-        * wake up the error handler that is waiting for this
-        */
-       cmdrsp->vdiskmgmt.notifyresult_handle = cmdrsp->vdiskmgmt.result;
-       wake_up_all((wait_queue_head_t *)cmdrsp->scsitaskmgmt.notify_handle);
-}
-
-static struct work_struct dar_work_queue;
-static struct diskaddremove *dar_work_queue_head;
-static spinlock_t dar_work_queue_lock; /* Lock to protet dar_work_queue_head */
-static unsigned short dar_work_queue_sched;
-
-/**
- *     queue_disk_add_remove - IOSP has sent us a add/remove request
- *     @dar: disk add/remove request
- *
- *     Queue the work needed to add/remove a disk.
- *     Returns void
- */
-static inline void queue_disk_add_remove(struct diskaddremove *dar)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dar_work_queue_lock, flags);
-       if (!dar_work_queue_head) {
-               dar_work_queue_head = dar;
-               dar->next = NULL;
-       } else {
-               dar->next = dar_work_queue_head;
-               dar_work_queue_head = dar;
-       }
-       if (!dar_work_queue_sched) {
-               schedule_work(&dar_work_queue);
-               dar_work_queue_sched = 1;
-       }
-       spin_unlock_irqrestore(&dar_work_queue_lock, flags);
-}
-
-/**
- *     process_disk_notify - IOSP has sent a process disk notify event
- *     @shost: Scsi hot
- *     @cmdrsp: Response from the IOSP
- *
- *     Queue it to the work queue.
- *     Return void.
- */
-static void process_disk_notify(struct Scsi_Host *shost,
-                               struct uiscmdrsp *cmdrsp)
-{
-       struct diskaddremove *dar;
-
-       dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
-       if (!dar)
-               return;
-
-       dar->add = cmdrsp->disknotify.add;
-       dar->shost = shost;
-       dar->channel = cmdrsp->disknotify.channel;
-       dar->id = cmdrsp->disknotify.id;
-       dar->lun = cmdrsp->disknotify.lun;
-       queue_disk_add_remove(dar);
-}
-
 /**
  *     drain_queue - pull responses out of iochannel
  *     @cmdrsp: Response from the IOSP
@@ -973,7 +948,6 @@ static void
 drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
 {
        struct scsi_cmnd *scsicmd;
-       struct Scsi_Host *shost = devdata->scsihost;
 
        while (1) {
                if (!visorchannel_signalremove(devdata->dev->visorchannel,
@@ -995,21 +969,12 @@ drain_queue(struct uiscmdrsp *cmdrsp, struct visorhba_devdata *devdata)
                        if (!del_scsipending_ent(devdata,
                                                 cmdrsp->scsitaskmgmt.handle))
                                break;
-                       complete_taskmgmt_command(cmdrsp);
-               } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
-                       /* The vHba pointer has no meaning in a
-                        * guest partition. Let's be safe and set it
-                        * to NULL now. Do not use it here!
-                        */
-                       cmdrsp->disknotify.v_hba = NULL;
-                       process_disk_notify(shost, cmdrsp);
-               } else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
-                       if (!del_scsipending_ent(devdata,
-                                                cmdrsp->vdiskmgmt.handle))
-                               break;
-                       complete_vdiskmgmt_command(cmdrsp);
-               }
-               /* cmdrsp is now available for resuse */
+                       complete_taskmgmt_command(&devdata->idr, cmdrsp,
+                                                 cmdrsp->scsitaskmgmt.result);
+               } else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE)
+                       dev_err_once(&devdata->dev->device,
+                                    "ignoring unsupported NOTIFYGUEST\n");
+               /* cmdrsp is now available for re-use */
        }
 }
 
@@ -1107,7 +1072,7 @@ static int visorhba_probe(struct visor_device *dev)
        struct Scsi_Host *scsihost;
        struct vhba_config_max max;
        struct visorhba_devdata *devdata = NULL;
-       int i, err, channel_offset;
+       int err, channel_offset;
        u64 features;
 
        scsihost = scsi_host_alloc(&visorhba_driver_template,
@@ -1122,9 +1087,9 @@ static int visorhba_probe(struct visor_device *dev)
        if (err < 0)
                goto err_scsi_host_put;
 
-       scsihost->max_id = (unsigned)max.max_id;
-       scsihost->max_lun = (unsigned)max.max_lun;
-       scsihost->cmd_per_lun = (unsigned)max.cmd_per_lun;
+       scsihost->max_id = (unsigned int)max.max_id;
+       scsihost->max_lun = (unsigned int)max.max_lun;
+       scsihost->cmd_per_lun = (unsigned int)max.cmd_per_lun;
        scsihost->max_sectors =
            (unsigned short)(max.max_io_size >> 9);
        scsihost->sg_tablesize =
@@ -1136,16 +1101,24 @@ static int visorhba_probe(struct visor_device *dev)
                goto err_scsi_host_put;
 
        devdata = (struct visorhba_devdata *)scsihost->hostdata;
-       for (i = 0; i < VISORHBA_OPEN_MAX; i++) {
-               if (!visorhbas_open[i].devdata) {
-                       visorhbas_open[i].devdata = devdata;
-                       break;
-               }
-       }
-
        devdata->dev = dev;
        dev_set_drvdata(&dev->device, devdata);
 
+       devdata->debugfs_dir = debugfs_create_dir(dev_name(&dev->device),
+                                                 visorhba_debugfs_dir);
+       if (!devdata->debugfs_dir) {
+               err = -ENOMEM;
+               goto err_scsi_remove_host;
+       }
+       devdata->debugfs_info =
+               debugfs_create_file("info", S_IRUSR | S_IRGRP,
+                                   devdata->debugfs_dir, devdata,
+                                   &info_debugfs_fops);
+       if (!devdata->debugfs_info) {
+               err = -ENOMEM;
+               goto err_debugfs_dir;
+       }
+
        init_waitqueue_head(&devdata->rsp_queue);
        spin_lock_init(&devdata->privlock);
        devdata->serverdown = false;
@@ -1156,11 +1129,13 @@ static int visorhba_probe(struct visor_device *dev)
                                  channel_header.features);
        err = visorbus_read_channel(dev, channel_offset, &features, 8);
        if (err)
-               goto err_scsi_remove_host;
+               goto err_debugfs_info;
        features |= ULTRA_IO_CHANNEL_IS_POLLING;
        err = visorbus_write_channel(dev, channel_offset, &features, 8);
        if (err)
-               goto err_scsi_remove_host;
+               goto err_debugfs_info;
+
+       idr_init(&devdata->idr);
 
        devdata->thread_wait_ms = 2;
        devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
@@ -1170,6 +1145,12 @@ static int visorhba_probe(struct visor_device *dev)
 
        return 0;
 
+err_debugfs_info:
+       debugfs_remove(devdata->debugfs_info);
+
+err_debugfs_dir:
+       debugfs_remove_recursive(devdata->debugfs_dir);
+
 err_scsi_remove_host:
        scsi_remove_host(scsihost);
 
@@ -1198,7 +1179,11 @@ static void visorhba_remove(struct visor_device *dev)
        scsi_remove_host(scsihost);
        scsi_host_put(scsihost);
 
+       idr_destroy(&devdata->idr);
+
        dev_set_drvdata(&dev->device, NULL);
+       debugfs_remove(devdata->debugfs_info);
+       debugfs_remove_recursive(devdata->debugfs_dir);
 }
 
 /**
@@ -1209,26 +1194,17 @@ static void visorhba_remove(struct visor_device *dev)
  */
 static int visorhba_init(void)
 {
-       struct dentry *ret;
        int rc = -ENOMEM;
 
        visorhba_debugfs_dir = debugfs_create_dir("visorhba", NULL);
        if (!visorhba_debugfs_dir)
                return -ENOMEM;
 
-       ret = debugfs_create_file("info", S_IRUSR, visorhba_debugfs_dir, NULL,
-                                 &debugfs_info_fops);
-
-       if (!ret) {
-               rc = -EIO;
-               goto cleanup_debugfs;
-       }
-
        rc = visorbus_register_visor_driver(&visorhba_driver);
        if (rc)
                goto cleanup_debugfs;
 
-       return rc;
+       return 0;
 
 cleanup_debugfs:
        debugfs_remove_recursive(visorhba_debugfs_dir);
index 12a3570780fca79d523fc5bd15ff0b8b4e4a64b1..d67cd76327c02f6d2288632ee8b2f6fd7c98d884 100644 (file)
@@ -506,7 +506,7 @@ calc_button(int x)
        case 3:
                return BTN_RIGHT;
        default:
-               return -1;
+               return -EINVAL;
        }
 }
 
index fd7c9a6cb6f3ba5e1c87904f12ceee2c7dddc175..a28388d3ddc2aee9799b55600314aa6a8ed5277c 100644 (file)
@@ -1000,25 +1000,28 @@ visornic_set_multi(struct net_device *netdev)
        struct uiscmdrsp *cmdrsp;
        struct visornic_devdata *devdata = netdev_priv(netdev);
 
-       /* any filtering changes */
-       if (devdata->old_flags != netdev->flags) {
-               if ((netdev->flags & IFF_PROMISC) !=
-                   (devdata->old_flags & IFF_PROMISC)) {
-                       cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
-                       if (!cmdrsp)
-                               return;
-                       cmdrsp->cmdtype = CMD_NET_TYPE;
-                       cmdrsp->net.type = NET_RCV_PROMISC;
-                       cmdrsp->net.enbdis.context = netdev;
-                       cmdrsp->net.enbdis.enable =
-                               netdev->flags & IFF_PROMISC;
-                       visorchannel_signalinsert(devdata->dev->visorchannel,
-                                                 IOCHAN_TO_IOPART,
-                                                 cmdrsp);
-                       kfree(cmdrsp);
-               }
-               devdata->old_flags = netdev->flags;
-       }
+       if (devdata->old_flags == netdev->flags)
+               return;
+
+       if ((netdev->flags & IFF_PROMISC) ==
+           (devdata->old_flags & IFF_PROMISC))
+               goto out_save_flags;
+
+       cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
+       if (!cmdrsp)
+               return;
+       cmdrsp->cmdtype = CMD_NET_TYPE;
+       cmdrsp->net.type = NET_RCV_PROMISC;
+       cmdrsp->net.enbdis.context = netdev;
+       cmdrsp->net.enbdis.enable =
+               netdev->flags & IFF_PROMISC;
+       visorchannel_signalinsert(devdata->dev->visorchannel,
+                                 IOCHAN_TO_IOPART,
+                                 cmdrsp);
+       kfree(cmdrsp);
+
+out_save_flags:
+       devdata->old_flags = netdev->flags;
 }
 
 /**
@@ -1134,7 +1137,7 @@ repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
  *
  *     Got a receive packet back from the IO Part, handle it and send
  *     it up the stack.
- *     Returns void
+ *     Returns 1 iff an skb was receieved, otherwise 0
  */
 static int
 visornic_rx(struct uiscmdrsp *cmdrsp)
@@ -1145,7 +1148,6 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
        int cc, currsize, off;
        struct ethhdr *eth;
        unsigned long flags;
-       int rx_count = 0;
 
        /* post new rcv buf to the other end using the cmdrsp we have at hand
         * post it without holding lock - but we'll use the signal lock to
@@ -1177,7 +1179,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
                 */
                spin_unlock_irqrestore(&devdata->priv_lock, flags);
                repost_return(cmdrsp, devdata, skb, netdev);
-               return rx_count;
+               return 0;
        }
 
        spin_unlock_irqrestore(&devdata->priv_lock, flags);
@@ -1196,7 +1198,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
                        if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
                                dev_err(&devdata->netdev->dev,
                                        "repost_return failed");
-                       return rx_count;
+                       return 0;
                }
                /* length rcvd is greater than firstfrag in this skb rcv buf  */
                skb->tail += RCVPOST_BUF_SIZE;  /* amount in skb->data */
@@ -1212,7 +1214,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
                        if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
                                dev_err(&devdata->netdev->dev,
                                        "repost_return failed");
-                       return rx_count;
+                       return 0;
                }
                skb->tail += skb->len;
                skb->data_len = 0;      /* nothing rcvd in frag_list */
@@ -1231,7 +1233,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
        if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
                if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
                        dev_err(&devdata->netdev->dev, "repost_return failed");
-               return rx_count;
+               return 0;
        }
 
        if (cmdrsp->net.rcv.numrcvbufs > 1) {
@@ -1313,10 +1315,9 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
                /* drop packet - don't forward it up to OS */
                devdata->n_rcv_packets_not_accepted++;
                repost_return(cmdrsp, devdata, skb, netdev);
-               return rx_count;
+               return 0;
        } while (0);
 
-       rx_count++;
        netif_receive_skb(skb);
        /* netif_rx returns various values, but "in practice most drivers
         * ignore the return value
@@ -1329,7 +1330,7 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
         * new rcv buffer.
         */
        repost_return(cmdrsp, devdata, skb, netdev);
-       return rx_count;
+       return 1;
 }
 
 /**
@@ -1339,13 +1340,11 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
  *
  *     Setup initial values for the visornic based on channel and default
  *     values.
- *     Returns a pointer to the devdata if successful, else NULL
+ *     Returns a pointer to the devdata structure
  */
 static struct visornic_devdata *
 devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
 {
-       if (!devdata)
-               return NULL;
        devdata->dev = dev;
        devdata->incarnation_id = get_jiffies_64();
        return devdata;
@@ -1793,7 +1792,7 @@ static int visornic_probe(struct visor_device *dev)
                                  sizeof(struct sk_buff *), GFP_KERNEL);
        if (!devdata->rcvbuf) {
                err = -ENOMEM;
-               goto cleanup_rcvbuf;
+               goto cleanup_netdev;
        }
 
        /* set the net_xmit outstanding threshold */
@@ -1814,12 +1813,12 @@ static int visornic_probe(struct visor_device *dev)
        devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
        if (!devdata->cmdrsp_rcv) {
                err = -ENOMEM;
-               goto cleanup_cmdrsp_rcv;
+               goto cleanup_rcvbuf;
        }
        devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
        if (!devdata->xmit_cmdrsp) {
                err = -ENOMEM;
-               goto cleanup_xmit_cmdrsp;
+               goto cleanup_cmdrsp_rcv;
        }
        INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
        devdata->server_down = false;
@@ -2088,8 +2087,10 @@ static int visornic_init(void)
                goto cleanup_debugfs;
 
        err = visorbus_register_visor_driver(&visornic_driver);
-       if (!err)
-               return 0;
+       if (err)
+               goto cleanup_debugfs;
+
+       return 0;
 
 cleanup_debugfs:
        debugfs_remove_recursive(visornic_debugfs_dir);
index acc3f3e8481ba19e948c877781bd38e1627d331a..d226283143058f30abdae95040215676da1f3d74 100644 (file)
@@ -6,7 +6,6 @@ ccflags-y += -DFIRMWARE_1002=\"atmel/wilc1002_firmware.bin\" \
 ccflags-y += -I$(src)/ -DWILC_ASIC_A0 -DWILC_DEBUGFS
 
 wilc1000-objs := wilc_wfi_cfgoperations.o linux_wlan.o linux_mon.o \
-                       wilc_msgqueue.o \
                        coreconfigurator.o host_interface.o \
                        wilc_wlan_cfg.o wilc_debugfs.o \
                        wilc_wlan.o
index 95199d80a3e49d56ff61612fc1b3cae5c433d8b8..ec93b2ee0b08c9a937bc9ad68e68c55075b767a6 100644 (file)
@@ -4,6 +4,11 @@ TODO:
 - remove custom debug and tracing functions
 - rework comments and function headers(also coding style)
 - replace all semaphores with mutexes or completions
+- Move handling for each individual members of 'union message_body' out
+  into a separate 'struct work_struct' and completely remove the multiplexer
+  that is currently part of host_if_work(), allowing movement of the
+  implementation of each message handler into the callsite of the function
+  that currently queues the 'host_if_msg'.
 - make spi and sdio components coexist in one build
 - turn compile-time platform configuration (BEAGLE_BOARD,
   PANDA_BOARD, PLAT_WMS8304, PLAT_RKXXXX, CUSTOMER_PLATFORM, ...)
index 953584248e63bc18e9a6728ee899506d38cae11d..0b1760cba6e35a77fc391f44f9cfc7c263b9e254 100644 (file)
@@ -3,11 +3,14 @@
 #include <linux/kthread.h>
 #include <linux/delay.h>
 #include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
 #include "host_interface.h"
+#include <linux/spinlock.h>
+#include <linux/errno.h>
 #include "coreconfigurator.h"
 #include "wilc_wlan.h"
 #include "wilc_wlan_if.h"
-#include "wilc_msgqueue.h"
 #include <linux/etherdevice.h>
 #include "wilc_wfi_netdevice.h"
 
@@ -181,7 +184,6 @@ union message_body {
        struct drv_handler drv;
        struct set_multicast multicast_info;
        struct op_mode mode;
-       struct set_mac_addr set_mac_info;
        struct get_mac_addr get_mac_info;
        struct ba_session_info session_info;
        struct remain_ch remain_on_ch;
@@ -195,6 +197,7 @@ struct host_if_msg {
        u16 id;
        union message_body body;
        struct wilc_vif *vif;
+       struct work_struct work;
 };
 
 struct join_bss_param {
@@ -229,8 +232,7 @@ struct join_bss_param {
 static struct host_if_drv *terminated_handle;
 bool wilc_optaining_ip;
 static u8 P2P_LISTEN_STATE;
-static struct task_struct *hif_thread_handler;
-static struct message_queue hif_msg_q;
+static struct workqueue_struct *hif_workqueue;
 static struct completion hif_thread_comp;
 static struct completion hif_driver_comp;
 static struct completion hif_wait_response;
@@ -264,6 +266,27 @@ static struct wilc_vif *join_req_vif;
 static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo);
 static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
 static s32 Handle_ScanDone(struct wilc_vif *vif, enum scan_event enuEvent);
+static void host_if_work(struct work_struct *work);
+
+/*!
+ *  @author            syounan
+ *  @date              1 Sep 2010
+ *  @note              copied from FLO glue implementatuion
+ *  @version           1.0
+ */
+static int wilc_enqueue_cmd(struct host_if_msg *msg)
+{
+       struct host_if_msg *new_msg;
+
+       new_msg = kmemdup(msg, sizeof(*new_msg), GFP_ATOMIC);
+       if (!new_msg)
+               return -ENOMEM;
+
+       INIT_WORK(&new_msg->work, host_if_work);
+       queue_work(hif_workqueue, &new_msg->work);
+       return 0;
+}
+
 
 /* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
  * special purpose in wilc device, so we add 1 to the index to starts from 1.
@@ -417,10 +440,10 @@ static void handle_get_mac_address(struct wilc_vif *vif,
        complete(&hif_wait_response);
 }
 
-static s32 handle_cfg_param(struct wilc_vif *vif,
-                           struct cfg_param_attr *cfg_param_attr)
+static void handle_cfg_param(struct wilc_vif *vif,
+                            struct cfg_param_attr *cfg_param_attr)
 {
-       s32 result = 0;
+       int ret = 0;
        struct wid wid_list[32];
        struct host_if_drv *hif_drv = vif->hif_drv;
        int i = 0;
@@ -428,15 +451,16 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
        mutex_lock(&hif_drv->cfg_values_lock);
 
        if (cfg_param_attr->flag & BSS_TYPE) {
-               if (cfg_param_attr->bss_type < 6) {
+               u8 bss_type = cfg_param_attr->bss_type;
+
+               if (bss_type < 6) {
                        wid_list[i].id = WID_BSS_TYPE;
-                       wid_list[i].val = (s8 *)&cfg_param_attr->bss_type;
+                       wid_list[i].val = (s8 *)&bss_type;
                        wid_list[i].type = WID_CHAR;
                        wid_list[i].size = sizeof(char);
-                       hif_drv->cfg_values.bss_type = (u8)cfg_param_attr->bss_type;
+                       hif_drv->cfg_values.bss_type = bss_type;
                } else {
                        netdev_err(vif->ndev, "check value 6 over\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -452,7 +476,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.auth_type = (u8)cfg_param_attr->auth_type;
                } else {
                        netdev_err(vif->ndev, "Impossible value\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -467,7 +490,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.auth_timeout = cfg_param_attr->auth_timeout;
                } else {
                        netdev_err(vif->ndev, "Range(1 ~ 65535) over\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -481,7 +503,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.power_mgmt_mode = (u8)cfg_param_attr->power_mgmt_mode;
                } else {
                        netdev_err(vif->ndev, "Invalid power mode\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -496,7 +517,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.short_retry_limit = cfg_param_attr->short_retry_limit;
                } else {
                        netdev_err(vif->ndev, "Range(1~256) over\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -511,7 +531,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.long_retry_limit = cfg_param_attr->long_retry_limit;
                } else {
                        netdev_err(vif->ndev, "Range(1~256) over\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -526,7 +545,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.frag_threshold = cfg_param_attr->frag_threshold;
                } else {
                        netdev_err(vif->ndev, "Threshold Range fail\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -541,7 +559,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.rts_threshold = cfg_param_attr->rts_threshold;
                } else {
                        netdev_err(vif->ndev, "Threshold Range fail\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -555,7 +572,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.preamble_type = cfg_param_attr->preamble_type;
                } else {
                        netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -569,7 +585,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.short_slot_allowed = (u8)cfg_param_attr->short_slot_allowed;
                } else {
                        netdev_err(vif->ndev, "Short slot(2) over\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -583,7 +598,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.txop_prot_disabled = (u8)cfg_param_attr->txop_prot_disabled;
                } else {
                        netdev_err(vif->ndev, "TXOP prot disable\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -598,7 +612,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.beacon_interval = cfg_param_attr->beacon_interval;
                } else {
                        netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -613,7 +626,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.dtim_period = cfg_param_attr->dtim_period;
                } else {
                        netdev_err(vif->ndev, "DTIM range(1~255) fail\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -627,7 +639,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.site_survey_enabled = (u8)cfg_param_attr->site_survey_enabled;
                } else {
                        netdev_err(vif->ndev, "Site survey disable\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -642,7 +653,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.site_survey_scan_time = cfg_param_attr->site_survey_scan_time;
                } else {
                        netdev_err(vif->ndev, "Site scan time(1~65535) over\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -657,7 +667,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.active_scan_time = cfg_param_attr->active_scan_time;
                } else {
                        netdev_err(vif->ndev, "Active time(1~65535) over\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -672,7 +681,6 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.passive_scan_time = cfg_param_attr->passive_scan_time;
                } else {
                        netdev_err(vif->ndev, "Passive time(1~65535) over\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
@@ -694,21 +702,19 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
                        hif_drv->cfg_values.curr_tx_rate = (u8)curr_tx_rate;
                } else {
                        netdev_err(vif->ndev, "out of TX rate\n");
-                       result = -EINVAL;
                        goto unlock;
                }
                i++;
        }
 
-       result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
-                                     i, wilc_get_vif_idx(vif));
+       ret = wilc_send_config_pkt(vif, SET_CFG, wid_list,
+                                  i, wilc_get_vif_idx(vif));
 
-       if (result)
+       if (ret)
                netdev_err(vif->ndev, "Error in setting CFG params\n");
 
 unlock:
        mutex_unlock(&hif_drv->cfg_values_lock);
-       return result;
 }
 
 static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
@@ -1231,17 +1237,14 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif,
                }
 
                for (i = 0; i < hif_drv->usr_scan_req.rcvd_ch_cnt; i++) {
-                       if ((hif_drv->usr_scan_req.net_info[i].bssid) &&
-                           (pstrNetworkInfo->bssid)) {
-                               if (memcmp(hif_drv->usr_scan_req.net_info[i].bssid,
-                                          pstrNetworkInfo->bssid, 6) == 0) {
-                                       if (pstrNetworkInfo->rssi <= hif_drv->usr_scan_req.net_info[i].rssi) {
-                                               goto done;
-                                       } else {
-                                               hif_drv->usr_scan_req.net_info[i].rssi = pstrNetworkInfo->rssi;
-                                               bNewNtwrkFound = false;
-                                               break;
-                                       }
+                       if (memcmp(hif_drv->usr_scan_req.net_info[i].bssid,
+                                  pstrNetworkInfo->bssid, 6) == 0) {
+                               if (pstrNetworkInfo->rssi <= hif_drv->usr_scan_req.net_info[i].rssi) {
+                                       goto done;
+                               } else {
+                                       hif_drv->usr_scan_req.net_info[i].rssi = pstrNetworkInfo->rssi;
+                                       bNewNtwrkFound = false;
+                                       break;
                                }
                        }
                }
@@ -1250,20 +1253,17 @@ static s32 Handle_RcvdNtwrkInfo(struct wilc_vif *vif,
                        if (hif_drv->usr_scan_req.rcvd_ch_cnt < MAX_NUM_SCANNED_NETWORKS) {
                                hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].rssi = pstrNetworkInfo->rssi;
 
-                               if (hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid &&
-                                   pstrNetworkInfo->bssid) {
-                                       memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid,
-                                              pstrNetworkInfo->bssid, 6);
+                               memcpy(hif_drv->usr_scan_req.net_info[hif_drv->usr_scan_req.rcvd_ch_cnt].bssid,
+                                      pstrNetworkInfo->bssid, 6);
 
-                                       hif_drv->usr_scan_req.rcvd_ch_cnt++;
+                               hif_drv->usr_scan_req.rcvd_ch_cnt++;
 
-                                       pstrNetworkInfo->new_network = true;
-                                       pJoinParams = host_int_ParseJoinBssParam(pstrNetworkInfo);
+                               pstrNetworkInfo->new_network = true;
+                               pJoinParams = host_int_ParseJoinBssParam(pstrNetworkInfo);
 
-                                       hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
-                                                                         hif_drv->usr_scan_req.arg,
-                                                                         pJoinParams);
-                               }
+                               hif_drv->usr_scan_req.scan_result(SCAN_EVENT_NETWORK_FOUND, pstrNetworkInfo,
+                                                                 hif_drv->usr_scan_req.arg,
+                                                                 pJoinParams);
                        }
                } else {
                        pstrNetworkInfo->new_network = false;
@@ -2364,7 +2364,7 @@ static void ListenTimerCB(unsigned long arg)
        msg.vif = vif;
        msg.body.remain_on_ch.id = vif->hif_drv->remain_on_ch.id;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
 }
@@ -2464,187 +2464,171 @@ static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
        complete(&hif_wait_response);
 }
 
-static int hostIFthread(void *pvArg)
+static void host_if_work(struct work_struct *work)
 {
-       u32 u32Ret;
-       struct host_if_msg msg;
-       struct wilc *wilc = pvArg;
-       struct wilc_vif *vif;
-
-       memset(&msg, 0, sizeof(struct host_if_msg));
-
-       while (1) {
-               wilc_mq_recv(&hif_msg_q, &msg, sizeof(struct host_if_msg), &u32Ret);
-               vif = msg.vif;
-               if (msg.id == HOST_IF_MSG_EXIT)
-                       break;
-
-               if ((!wilc_initialized)) {
-                       usleep_range(200 * 1000, 200 * 1000);
-                       wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
-                       continue;
-               }
-
-               if (msg.id == HOST_IF_MSG_CONNECT &&
-                   vif->hif_drv->usr_scan_req.scan_result) {
-                       wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
-                       usleep_range(2 * 1000, 2 * 1000);
-                       continue;
-               }
+       struct host_if_msg *msg;
+       struct wilc *wilc;
 
-               switch (msg.id) {
-               case HOST_IF_MSG_SCAN:
-                       handle_scan(msg.vif, &msg.body.scan_info);
-                       break;
+       msg = container_of(work, struct host_if_msg, work);
+       wilc = msg->vif->wilc;
 
-               case HOST_IF_MSG_CONNECT:
-                       Handle_Connect(msg.vif, &msg.body.con_info);
-                       break;
+       if (msg->id == HOST_IF_MSG_CONNECT &&
+           msg->vif->hif_drv->usr_scan_req.scan_result) {
+               wilc_enqueue_cmd(msg);
+               usleep_range(2 * 1000, 2 * 1000);
+               goto free_msg;
+       }
+       switch (msg->id) {
+       case HOST_IF_MSG_SCAN:
+               handle_scan(msg->vif, &msg->body.scan_info);
+               break;
 
-               case HOST_IF_MSG_RCVD_NTWRK_INFO:
-                       Handle_RcvdNtwrkInfo(msg.vif, &msg.body.net_info);
-                       break;
+       case HOST_IF_MSG_CONNECT:
+               Handle_Connect(msg->vif, &msg->body.con_info);
+               break;
 
-               case HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO:
-                       Handle_RcvdGnrlAsyncInfo(vif,
-                                                &msg.body.async_info);
-                       break;
+       case HOST_IF_MSG_RCVD_NTWRK_INFO:
+               Handle_RcvdNtwrkInfo(msg->vif, &msg->body.net_info);
+               break;
 
-               case HOST_IF_MSG_KEY:
-                       Handle_Key(msg.vif, &msg.body.key_info);
-                       break;
+       case HOST_IF_MSG_RCVD_GNRL_ASYNC_INFO:
+               Handle_RcvdGnrlAsyncInfo(msg->vif,
+                                        &msg->body.async_info);
+               break;
 
-               case HOST_IF_MSG_CFG_PARAMS:
-                       handle_cfg_param(msg.vif, &msg.body.cfg_info);
-                       break;
+       case HOST_IF_MSG_KEY:
+               Handle_Key(msg->vif, &msg->body.key_info);
+               break;
 
-               case HOST_IF_MSG_SET_CHANNEL:
-                       handle_set_channel(msg.vif, &msg.body.channel_info);
-                       break;
+       case HOST_IF_MSG_CFG_PARAMS:
+               handle_cfg_param(msg->vif, &msg->body.cfg_info);
+               break;
 
-               case HOST_IF_MSG_DISCONNECT:
-                       Handle_Disconnect(msg.vif);
-                       break;
+       case HOST_IF_MSG_SET_CHANNEL:
+               handle_set_channel(msg->vif, &msg->body.channel_info);
+               break;
 
-               case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
-                       del_timer(&vif->hif_drv->scan_timer);
+       case HOST_IF_MSG_DISCONNECT:
+               Handle_Disconnect(msg->vif);
+               break;
 
-                       if (!wilc_wlan_get_num_conn_ifcs(wilc))
-                               wilc_chip_sleep_manually(wilc);
+       case HOST_IF_MSG_RCVD_SCAN_COMPLETE:
+               del_timer(&msg->vif->hif_drv->scan_timer);
 
-                       Handle_ScanDone(msg.vif, SCAN_EVENT_DONE);
+               if (!wilc_wlan_get_num_conn_ifcs(wilc))
+                       wilc_chip_sleep_manually(wilc);
 
-                       if (vif->hif_drv->remain_on_ch_pending)
-                               Handle_RemainOnChan(msg.vif,
-                                                   &msg.body.remain_on_ch);
+               Handle_ScanDone(msg->vif, SCAN_EVENT_DONE);
 
-                       break;
+               if (msg->vif->hif_drv->remain_on_ch_pending)
+                       Handle_RemainOnChan(msg->vif,
+                                           &msg->body.remain_on_ch);
 
-               case HOST_IF_MSG_GET_RSSI:
-                       Handle_GetRssi(msg.vif);
-                       break;
+               break;
 
-               case HOST_IF_MSG_GET_STATISTICS:
-                       Handle_GetStatistics(msg.vif,
-                                            (struct rf_info *)msg.body.data);
-                       break;
+       case HOST_IF_MSG_GET_RSSI:
+               Handle_GetRssi(msg->vif);
+               break;
 
-               case HOST_IF_MSG_ADD_BEACON:
-                       Handle_AddBeacon(msg.vif, &msg.body.beacon_info);
-                       break;
+       case HOST_IF_MSG_GET_STATISTICS:
+               Handle_GetStatistics(msg->vif,
+                                    (struct rf_info *)msg->body.data);
+               break;
 
-               case HOST_IF_MSG_DEL_BEACON:
-                       Handle_DelBeacon(msg.vif);
-                       break;
+       case HOST_IF_MSG_ADD_BEACON:
+               Handle_AddBeacon(msg->vif, &msg->body.beacon_info);
+               break;
 
-               case HOST_IF_MSG_ADD_STATION:
-                       Handle_AddStation(msg.vif, &msg.body.add_sta_info);
-                       break;
+       case HOST_IF_MSG_DEL_BEACON:
+               Handle_DelBeacon(msg->vif);
+               break;
 
-               case HOST_IF_MSG_DEL_STATION:
-                       Handle_DelStation(msg.vif, &msg.body.del_sta_info);
-                       break;
+       case HOST_IF_MSG_ADD_STATION:
+               Handle_AddStation(msg->vif, &msg->body.add_sta_info);
+               break;
 
-               case HOST_IF_MSG_EDIT_STATION:
-                       Handle_EditStation(msg.vif, &msg.body.edit_sta_info);
-                       break;
+       case HOST_IF_MSG_DEL_STATION:
+               Handle_DelStation(msg->vif, &msg->body.del_sta_info);
+               break;
 
-               case HOST_IF_MSG_GET_INACTIVETIME:
-                       Handle_Get_InActiveTime(msg.vif, &msg.body.mac_info);
-                       break;
+       case HOST_IF_MSG_EDIT_STATION:
+               Handle_EditStation(msg->vif, &msg->body.edit_sta_info);
+               break;
 
-               case HOST_IF_MSG_SCAN_TIMER_FIRED:
+       case HOST_IF_MSG_GET_INACTIVETIME:
+               Handle_Get_InActiveTime(msg->vif, &msg->body.mac_info);
+               break;
 
-                       Handle_ScanDone(msg.vif, SCAN_EVENT_ABORTED);
-                       break;
+       case HOST_IF_MSG_SCAN_TIMER_FIRED:
+               Handle_ScanDone(msg->vif, SCAN_EVENT_ABORTED);
+               break;
 
-               case HOST_IF_MSG_CONNECT_TIMER_FIRED:
-                       Handle_ConnectTimeout(msg.vif);
-                       break;
+       case HOST_IF_MSG_CONNECT_TIMER_FIRED:
+               Handle_ConnectTimeout(msg->vif);
+               break;
 
-               case HOST_IF_MSG_POWER_MGMT:
-                       Handle_PowerManagement(msg.vif,
-                                              &msg.body.pwr_mgmt_info);
-                       break;
+       case HOST_IF_MSG_POWER_MGMT:
+               Handle_PowerManagement(msg->vif,
+                                      &msg->body.pwr_mgmt_info);
+               break;
 
-               case HOST_IF_MSG_SET_WFIDRV_HANDLER:
-                       handle_set_wfi_drv_handler(msg.vif, &msg.body.drv);
-                       break;
+       case HOST_IF_MSG_SET_WFIDRV_HANDLER:
+               handle_set_wfi_drv_handler(msg->vif, &msg->body.drv);
+               break;
 
-               case HOST_IF_MSG_SET_OPERATION_MODE:
-                       handle_set_operation_mode(msg.vif, &msg.body.mode);
-                       break;
+       case HOST_IF_MSG_SET_OPERATION_MODE:
+               handle_set_operation_mode(msg->vif, &msg->body.mode);
+               break;
 
-               case HOST_IF_MSG_SET_IPADDRESS:
-                       handle_set_ip_address(vif,
-                                             msg.body.ip_info.ip_addr,
-                                             msg.body.ip_info.idx);
-                       break;
+       case HOST_IF_MSG_SET_IPADDRESS:
+               handle_set_ip_address(msg->vif,
+                                     msg->body.ip_info.ip_addr,
+                                     msg->body.ip_info.idx);
+               break;
 
-               case HOST_IF_MSG_GET_IPADDRESS:
-                       handle_get_ip_address(vif, msg.body.ip_info.idx);
-                       break;
+       case HOST_IF_MSG_GET_IPADDRESS:
+               handle_get_ip_address(msg->vif, msg->body.ip_info.idx);
+               break;
 
-               case HOST_IF_MSG_GET_MAC_ADDRESS:
-                       handle_get_mac_address(msg.vif,
-                                              &msg.body.get_mac_info);
-                       break;
+       case HOST_IF_MSG_GET_MAC_ADDRESS:
+               handle_get_mac_address(msg->vif,
+                                      &msg->body.get_mac_info);
+               break;
 
-               case HOST_IF_MSG_REMAIN_ON_CHAN:
-                       Handle_RemainOnChan(msg.vif, &msg.body.remain_on_ch);
-                       break;
+       case HOST_IF_MSG_REMAIN_ON_CHAN:
+               Handle_RemainOnChan(msg->vif, &msg->body.remain_on_ch);
+               break;
 
-               case HOST_IF_MSG_REGISTER_FRAME:
-                       Handle_RegisterFrame(msg.vif, &msg.body.reg_frame);
-                       break;
+       case HOST_IF_MSG_REGISTER_FRAME:
+               Handle_RegisterFrame(msg->vif, &msg->body.reg_frame);
+               break;
 
-               case HOST_IF_MSG_LISTEN_TIMER_FIRED:
-                       Handle_ListenStateExpired(msg.vif, &msg.body.remain_on_ch);
-                       break;
+       case HOST_IF_MSG_LISTEN_TIMER_FIRED:
+               Handle_ListenStateExpired(msg->vif, &msg->body.remain_on_ch);
+               break;
 
-               case HOST_IF_MSG_SET_MULTICAST_FILTER:
-                       Handle_SetMulticastFilter(msg.vif, &msg.body.multicast_info);
-                       break;
+       case HOST_IF_MSG_SET_MULTICAST_FILTER:
+               Handle_SetMulticastFilter(msg->vif, &msg->body.multicast_info);
+               break;
 
-               case HOST_IF_MSG_DEL_ALL_STA:
-                       Handle_DelAllSta(msg.vif, &msg.body.del_all_sta_info);
-                       break;
+       case HOST_IF_MSG_DEL_ALL_STA:
+               Handle_DelAllSta(msg->vif, &msg->body.del_all_sta_info);
+               break;
 
-               case HOST_IF_MSG_SET_TX_POWER:
-                       handle_set_tx_pwr(msg.vif, msg.body.tx_power.tx_pwr);
-                       break;
+       case HOST_IF_MSG_SET_TX_POWER:
+               handle_set_tx_pwr(msg->vif, msg->body.tx_power.tx_pwr);
+               break;
 
-               case HOST_IF_MSG_GET_TX_POWER:
-                       handle_get_tx_pwr(msg.vif, &msg.body.tx_power.tx_pwr);
-                       break;
-               default:
-                       netdev_err(vif->ndev, "[Host Interface] undefined\n");
-                       break;
-               }
+       case HOST_IF_MSG_GET_TX_POWER:
+               handle_get_tx_pwr(msg->vif, &msg->body.tx_power.tx_pwr);
+               break;
+       default:
+               netdev_err(msg->vif->ndev, "[Host Interface] undefined\n");
+               break;
        }
-
+free_msg:
+       kfree(msg);
        complete(&hif_thread_comp);
-       return 0;
 }
 
 static void TimerCB_Scan(unsigned long arg)
@@ -2656,7 +2640,7 @@ static void TimerCB_Scan(unsigned long arg)
        msg.vif = vif;
        msg.id = HOST_IF_MSG_SCAN_TIMER_FIRED;
 
-       wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       wilc_enqueue_cmd(&msg);
 }
 
 static void TimerCB_Connect(unsigned long arg)
@@ -2668,7 +2652,7 @@ static void TimerCB_Connect(unsigned long arg)
        msg.vif = vif;
        msg.id = HOST_IF_MSG_CONNECT_TIMER_FIRED;
 
-       wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       wilc_enqueue_cmd(&msg);
 }
 
 s32 wilc_remove_key(struct host_if_drv *hif_drv, const u8 *pu8StaAddress)
@@ -2703,7 +2687,7 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
        msg.vif = vif;
        msg.body.key_info.attr.wep.index = index;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "Request to remove WEP key\n");
        else
@@ -2732,7 +2716,7 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
        msg.vif = vif;
        msg.body.key_info.attr.wep.index = index;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "Default key index\n");
        else
@@ -2766,7 +2750,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
        msg.body.key_info.attr.wep.key_len = len;
        msg.body.key_info.attr.wep.index = index;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "STA - WEP Key\n");
        wait_for_completion(&hif_drv->comp_test_key_block);
@@ -2801,7 +2785,7 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
        msg.body.key_info.attr.wep.mode = mode;
        msg.body.key_info.attr.wep.auth_type = auth_type;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
 
        if (result)
                netdev_err(vif->ndev, "AP - WEP Key\n");
@@ -2857,7 +2841,7 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
        msg.body.key_info.attr.wpa.mode = cipher_mode;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
 
        if (result)
                netdev_err(vif->ndev, "PTK Key\n");
@@ -2926,7 +2910,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
        msg.body.key_info.attr.wpa.key_len = key_len;
        msg.body.key_info.attr.wpa.seq_len = key_rsc_len;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "RX GTK\n");
        else
@@ -2956,7 +2940,7 @@ int wilc_set_pmkid_info(struct wilc_vif *vif,
                       &pmkid->pmkidlist[i].pmkid, PMKID_LEN);
        }
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "PMKID Info\n");
 
@@ -2974,7 +2958,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
        msg.body.get_mac_info.mac_addr = mac_addr;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result) {
                netdev_err(vif->ndev, "Failed to send get mac address\n");
                return -EFAULT;
@@ -3038,7 +3022,7 @@ int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ssid,
        if (hif_drv->hif_state < HOST_IF_CONNECTING)
                hif_drv->hif_state = HOST_IF_CONNECTING;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result) {
                netdev_err(vif->ndev, "send message: Set join request\n");
                return -EFAULT;
@@ -3067,7 +3051,7 @@ int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
        msg.id = HOST_IF_MSG_DISCONNECT;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "Failed to send message: disconnect\n");
        else
@@ -3111,7 +3095,7 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
        msg.body.channel_info.set_ch = channel;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result) {
                netdev_err(vif->ndev, "wilc mq send fail\n");
                return -EINVAL;
@@ -3131,7 +3115,7 @@ int wilc_set_wfi_drv_handler(struct wilc_vif *vif, int index, u8 mac_idx)
        msg.body.drv.mac_idx = mac_idx;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result) {
                netdev_err(vif->ndev, "wilc mq send fail\n");
                result = -EINVAL;
@@ -3150,7 +3134,7 @@ int wilc_set_operation_mode(struct wilc_vif *vif, u32 mode)
        msg.body.mode.mode = mode;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result) {
                netdev_err(vif->ndev, "wilc mq send fail\n");
                result = -EINVAL;
@@ -3177,7 +3161,7 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
        msg.id = HOST_IF_MSG_GET_INACTIVETIME;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "Failed to send get host ch param\n");
        else
@@ -3198,7 +3182,7 @@ int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
        msg.id = HOST_IF_MSG_GET_RSSI;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result) {
                netdev_err(vif->ndev, "Failed to send get host ch param\n");
                return -EFAULT;
@@ -3226,7 +3210,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
        msg.body.data = (char *)stats;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result) {
                netdev_err(vif->ndev, "Failed to send get host channel\n");
                return -EFAULT;
@@ -3279,7 +3263,7 @@ int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type,
        if (!scan_info->ies)
                return -ENOMEM;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result) {
                netdev_err(vif->ndev, "Error in sending message queue\n");
                return -EINVAL;
@@ -3309,7 +3293,7 @@ int wilc_hif_set_cfg(struct wilc_vif *vif,
        msg.body.cfg_info = *cfg_param;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
 
        return result;
 }
@@ -3371,21 +3355,17 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
        init_completion(&hif_drv->comp_inactive_time);
 
        if (clients_count == 0) {
-               result = wilc_mq_create(&hif_msg_q);
-
                if (result < 0) {
                        netdev_err(vif->ndev, "Failed to creat MQ\n");
                        goto _fail_;
                }
-
-               hif_thread_handler = kthread_run(hostIFthread, wilc,
-                                                "WILC_kthread");
-
-               if (IS_ERR(hif_thread_handler)) {
-                       netdev_err(vif->ndev, "Failed to creat Thread\n");
-                       result = -EFAULT;
+               hif_workqueue = create_singlethread_workqueue("WILC_wq");
+               if (!hif_workqueue) {
+                       netdev_err(vif->ndev, "Failed to create workqueue\n");
+                       result = -ENOMEM;
                        goto _fail_mq_;
                }
+
                setup_timer(&periodic_rssi, GetPeriodicRSSI,
                            (unsigned long)vif);
                mod_timer(&periodic_rssi, jiffies + msecs_to_jiffies(5000));
@@ -3411,10 +3391,8 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
 
        clients_count++;
 
-       return result;
-
 _fail_mq_:
-       wilc_mq_destroy(&hif_msg_q);
+       destroy_workqueue(hif_workqueue);
 _fail_:
        return result;
 }
@@ -3458,13 +3436,13 @@ int wilc_deinit(struct wilc_vif *vif)
                msg.id = HOST_IF_MSG_EXIT;
                msg.vif = vif;
 
-               result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+               result = wilc_enqueue_cmd(&msg);
                if (result != 0)
                        netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
                else
                        wait_for_completion(&hif_thread_comp);
 
-               wilc_mq_destroy(&hif_msg_q);
+               destroy_workqueue(hif_workqueue);
        }
 
        kfree(hif_drv);
@@ -3504,7 +3482,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *pu8Buffer,
        msg.body.net_info.buffer = kmalloc(u32Length, GFP_KERNEL);
        memcpy(msg.body.net_info.buffer, pu8Buffer, u32Length);
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "message parameters (%d)\n", result);
 }
@@ -3549,7 +3527,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
        msg.body.async_info.buffer = kmalloc(u32Length, GFP_KERNEL);
        memcpy(msg.body.async_info.buffer, pu8Buffer, u32Length);
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "synchronous info (%d)\n", result);
 
@@ -3580,7 +3558,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
                msg.id = HOST_IF_MSG_RCVD_SCAN_COMPLETE;
                msg.vif = vif;
 
-               result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+               result = wilc_enqueue_cmd(&msg);
                if (result)
                        netdev_err(vif->ndev, "complete param (%d)\n", result);
        }
@@ -3606,7 +3584,7 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
        msg.body.remain_on_ch.id = session_id;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc mq send fail\n");
 
@@ -3631,7 +3609,7 @@ int wilc_listen_state_expired(struct wilc_vif *vif, u32 session_id)
        msg.vif = vif;
        msg.body.remain_on_ch.id = session_id;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc mq send fail\n");
 
@@ -3662,7 +3640,7 @@ int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
        msg.body.reg_frame.reg = reg;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc mq send fail\n");
 
@@ -3700,7 +3678,7 @@ int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
                beacon_info->tail = NULL;
        }
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc mq send fail\n");
 
@@ -3722,7 +3700,7 @@ int wilc_del_beacon(struct wilc_vif *vif)
        msg.id = HOST_IF_MSG_DEL_BEACON;
        msg.vif = vif;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
@@ -3749,7 +3727,7 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
                        return -ENOMEM;
        }
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
        return result;
@@ -3771,7 +3749,7 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
        else
                memcpy(del_sta_info->mac_addr, mac_addr, ETH_ALEN);
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
        return result;
@@ -3801,7 +3779,7 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
                return result;
 
        del_all_sta_info->assoc_sta = assoc_sta;
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
 
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
@@ -3832,7 +3810,7 @@ int wilc_edit_station(struct wilc_vif *vif,
                        return -ENOMEM;
        }
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
@@ -3856,7 +3834,7 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
        pwr_mgmt_info->enabled = enabled;
        pwr_mgmt_info->timeout = timeout;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
        return result;
@@ -3877,7 +3855,7 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
        multicast_filter_param->enabled = enabled;
        multicast_filter_param->cnt = count;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
        return result;
@@ -4050,7 +4028,7 @@ int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
        msg.vif = vif;
        msg.body.ip_info.idx = idx;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
@@ -4070,7 +4048,7 @@ static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
        msg.vif = vif;
        msg.body.ip_info.idx = idx;
 
-       result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       result = wilc_enqueue_cmd(&msg);
        if (result)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
@@ -4088,7 +4066,7 @@ int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power)
        msg.body.tx_power.tx_pwr = tx_power;
        msg.vif = vif;
 
-       ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       ret = wilc_enqueue_cmd(&msg);
        if (ret)
                netdev_err(vif->ndev, "wilc_mq_send fail\n");
 
@@ -4105,7 +4083,7 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
        msg.id = HOST_IF_MSG_GET_TX_POWER;
        msg.vif = vif;
 
-       ret = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
+       ret = wilc_enqueue_cmd(&msg);
        if (ret)
                netdev_err(vif->ndev, "Failed to get TX PWR\n");
 
index 8d2dd0db0bedfb87d7cb9ffa1e6e41f116cbb082..ddfea29df2a72ab667c04726c2e68c2c9abd9049 100644 (file)
@@ -224,10 +224,6 @@ struct op_mode {
        u32 mode;
 };
 
-struct set_mac_addr {
-       u8 mac_addr[ETH_ALEN];
-};
-
 struct get_mac_addr {
        u8 *mac_addr;
 };
index 4f93c11e73c02d61e027ac7377c6cf66d12aee6a..3a66255f14fc77a7376eedee063452c0b45c06be 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/skbuff.h>
-
+#include <linux/mutex.h>
 #include <linux/semaphore.h>
 #include <linux/completion.h>
 
@@ -31,8 +31,6 @@ static struct notifier_block g_dev_notifier = {
        .notifier_call = dev_state_ev_handler
 };
 
-static struct semaphore close_exit_sync;
-
 static int wlan_deinit_locks(struct net_device *dev);
 static void wlan_deinitialize_threads(struct net_device *dev);
 
@@ -241,7 +239,7 @@ void wilc_mac_indicate(struct wilc *wilc, int flag)
                                      (unsigned char *)&status, 4);
                if (wilc->mac_status == WILC_MAC_STATUS_INIT) {
                        wilc->mac_status = status;
-                       up(&wilc->sync_event);
+                       complete(&wilc->sync_event);
                } else {
                        wilc->mac_status = status;
                }
@@ -316,7 +314,7 @@ static int linux_wlan_txq_task(void *vp)
 
        complete(&wl->txq_thread_started);
        while (1) {
-               down(&wl->txq_event);
+               wait_for_completion(&wl->txq_event);
 
                if (wl->close) {
                        complete(&wl->txq_thread_started);
@@ -362,7 +360,7 @@ int wilc_wlan_get_firmware(struct net_device *dev)
                goto _fail_;
 
        if (request_firmware(&wilc_firmware, firmware, wilc->dev) != 0) {
-               netdev_err(dev, "%s - firmare not available\n", firmware);
+               netdev_err(dev, "%s - firmware not available\n", firmware);
                ret = -1;
                goto _fail_;
        }
@@ -386,9 +384,9 @@ static int linux_wlan_start_firmware(struct net_device *dev)
        if (ret < 0)
                return ret;
 
-       ret = wilc_lock_timeout(wilc, &wilc->sync_event, 5000);
-       if (ret)
-               return ret;
+       if (!wait_for_completion_timeout(&wilc->sync_event,
+                                       msecs_to_jiffies(5000)))
+               return -ETIME;
 
        return 0;
 }
@@ -650,7 +648,7 @@ void wilc1000_wlan_deinit(struct net_device *dev)
                        mutex_unlock(&wl->hif_cs);
                }
                if (&wl->txq_event)
-                       up(&wl->txq_event);
+                       wait_for_completion(&wl->txq_event);
 
                wlan_deinitialize_threads(dev);
                deinit_irq(dev);
@@ -679,12 +677,12 @@ static int wlan_init_locks(struct net_device *dev)
        mutex_init(&wl->rxq_cs);
 
        spin_lock_init(&wl->txq_spinlock);
-       sema_init(&wl->txq_add_to_head_cs, 1);
+       mutex_init(&wl->txq_add_to_head_cs);
 
-       sema_init(&wl->txq_event, 0);
+       init_completion(&wl->txq_event);
 
-       sema_init(&wl->cfg_event, 0);
-       sema_init(&wl->sync_event, 0);
+       init_completion(&wl->cfg_event);
+       init_completion(&wl->sync_event);
        init_completion(&wl->txq_thread_started);
 
        return 0;
@@ -717,10 +715,10 @@ static int wlan_initialize_threads(struct net_device *dev)
 
        wilc->txq_thread = kthread_run(linux_wlan_txq_task, (void *)dev,
                                     "K_TXQ_TASK");
-       if (!wilc->txq_thread) {
+       if (IS_ERR(wilc->txq_thread)) {
                netdev_err(dev, "couldn't create TXQ thread\n");
                wilc->close = 0;
-               return -ENOBUFS;
+               return PTR_ERR(wilc->txq_thread);
        }
        wait_for_completion(&wilc->txq_thread_started);
 
@@ -738,7 +736,7 @@ static void wlan_deinitialize_threads(struct net_device *dev)
        wl->close = 1;
 
        if (&wl->txq_event)
-               up(&wl->txq_event);
+               complete(&wl->txq_event);
 
        if (wl->txq_thread) {
                kthread_stop(wl->txq_thread);
@@ -1088,7 +1086,6 @@ int wilc_mac_close(struct net_device *ndev)
                WILC_WFI_deinit_mon_interface();
        }
 
-       up(&close_exit_sync);
        vif->mac_opened = 0;
 
        return 0;
@@ -1232,8 +1229,6 @@ void wilc_netdev_cleanup(struct wilc *wilc)
        }
 
        if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
-               wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000);
-
                for (i = 0; i < NUM_CONCURRENT_IFC; i++)
                        if (wilc->vif[i]->ndev)
                                if (vif[i]->mac_opened)
@@ -1258,8 +1253,6 @@ int wilc_netdev_init(struct wilc **wilc, struct device *dev, int io_type,
        struct net_device *ndev;
        struct wilc *wl;
 
-       sema_init(&close_exit_sync, 0);
-
        wl = kzalloc(sizeof(*wl), GFP_KERNEL);
        if (!wl)
                return -ENOMEM;
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.c b/drivers/staging/wilc1000/wilc_msgqueue.c
deleted file mode 100644 (file)
index 6cb894e..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-
-#include "wilc_msgqueue.h"
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-/*!
- *  @author            syounan
- *  @date              1 Sep 2010
- *  @note              copied from FLO glue implementatuion
- *  @version           1.0
- */
-int wilc_mq_create(struct message_queue *mq)
-{
-       spin_lock_init(&mq->lock);
-       sema_init(&mq->sem, 0);
-       INIT_LIST_HEAD(&mq->msg_list);
-       mq->recv_count = 0;
-       mq->exiting = false;
-       return 0;
-}
-
-/*!
- *  @author            syounan
- *  @date              1 Sep 2010
- *  @note              copied from FLO glue implementatuion
- *  @version           1.0
- */
-int wilc_mq_destroy(struct message_queue *mq)
-{
-       struct message *msg;
-
-       mq->exiting = true;
-
-       /* Release any waiting receiver thread. */
-       while (mq->recv_count > 0) {
-               up(&mq->sem);
-               mq->recv_count--;
-       }
-
-       while (!list_empty(&mq->msg_list)) {
-               msg = list_first_entry(&mq->msg_list, struct message, list);
-               list_del(&msg->list);
-               kfree(msg->buf);
-       }
-
-       return 0;
-}
-
-/*!
- *  @author            syounan
- *  @date              1 Sep 2010
- *  @note              copied from FLO glue implementatuion
- *  @version           1.0
- */
-int wilc_mq_send(struct message_queue *mq,
-                const void *send_buf, u32 send_buf_size)
-{
-       unsigned long flags;
-       struct message *new_msg = NULL;
-
-       if (!mq || (send_buf_size == 0) || !send_buf)
-               return -EINVAL;
-
-       if (mq->exiting)
-               return -EFAULT;
-
-       /* construct a new message */
-       new_msg = kmalloc(sizeof(*new_msg), GFP_ATOMIC);
-       if (!new_msg)
-               return -ENOMEM;
-
-       new_msg->len = send_buf_size;
-       INIT_LIST_HEAD(&new_msg->list);
-       new_msg->buf = kmemdup(send_buf, send_buf_size, GFP_ATOMIC);
-       if (!new_msg->buf) {
-               kfree(new_msg);
-               return -ENOMEM;
-       }
-
-       spin_lock_irqsave(&mq->lock, flags);
-
-       /* add it to the message queue */
-       list_add_tail(&new_msg->list, &mq->msg_list);
-
-       spin_unlock_irqrestore(&mq->lock, flags);
-
-       up(&mq->sem);
-
-       return 0;
-}
-
-/*!
- *  @author            syounan
- *  @date              1 Sep 2010
- *  @note              copied from FLO glue implementatuion
- *  @version           1.0
- */
-int wilc_mq_recv(struct message_queue *mq,
-                void *recv_buf, u32 recv_buf_size, u32 *recv_len)
-{
-       struct message *msg;
-       unsigned long flags;
-
-       if (!mq || (recv_buf_size == 0) || !recv_buf || !recv_len)
-               return -EINVAL;
-
-       if (mq->exiting)
-               return -EFAULT;
-
-       spin_lock_irqsave(&mq->lock, flags);
-       mq->recv_count++;
-       spin_unlock_irqrestore(&mq->lock, flags);
-
-       down(&mq->sem);
-       spin_lock_irqsave(&mq->lock, flags);
-
-       if (list_empty(&mq->msg_list)) {
-               spin_unlock_irqrestore(&mq->lock, flags);
-               up(&mq->sem);
-               return -EFAULT;
-       }
-       /* check buffer size */
-       msg = list_first_entry(&mq->msg_list, struct message, list);
-       if (recv_buf_size < msg->len) {
-               spin_unlock_irqrestore(&mq->lock, flags);
-               up(&mq->sem);
-               return -EOVERFLOW;
-       }
-
-       /* consume the message */
-       mq->recv_count--;
-       memcpy(recv_buf, msg->buf, msg->len);
-       *recv_len = msg->len;
-
-       list_del(&msg->list);
-
-       kfree(msg->buf);
-       kfree(msg);
-
-       spin_unlock_irqrestore(&mq->lock, flags);
-
-       return 0;
-}
diff --git a/drivers/staging/wilc1000/wilc_msgqueue.h b/drivers/staging/wilc1000/wilc_msgqueue.h
deleted file mode 100644 (file)
index 846a484..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef __WILC_MSG_QUEUE_H__
-#define __WILC_MSG_QUEUE_H__
-
-#include <linux/semaphore.h>
-#include <linux/list.h>
-
-struct message {
-       void *buf;
-       u32 len;
-       struct list_head list;
-};
-
-struct message_queue {
-       struct semaphore sem;
-       spinlock_t lock;
-       bool exiting;
-       u32 recv_count;
-       struct list_head msg_list;
-};
-
-int wilc_mq_create(struct message_queue *mq);
-int wilc_mq_send(struct message_queue *mq,
-                const void *send_buf, u32 send_buf_size);
-int wilc_mq_recv(struct message_queue *mq,
-                void *recv_buf, u32 recv_buf_size, u32 *recv_len);
-int wilc_mq_destroy(struct message_queue *mq);
-
-#endif
index a839a7967dd83053f0891f9060d0df6b5df4dcd6..39b73fb27398b0599dd0ee14066b3b41c3181226 100644 (file)
@@ -1006,7 +1006,7 @@ static int sdio_sync_ext(struct wilc *wilc, int nint)
        u32 reg;
 
        if (nint > MAX_NUM_INT) {
-               dev_err(&func->dev, "Too many interupts (%d)...\n", nint);
+               dev_err(&func->dev, "Too many interrupts (%d)...\n", nint);
                return 0;
        }
        if (nint > MAX_NUN_INT_THRPT_ENH2) {
index 4268e2f29307ae89c8133a505cdd6603541f17d2..22cf4b7857e5317b34a940d8b100591984ea246c 100644 (file)
@@ -1082,7 +1082,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
        int ret, i;
 
        if (nint > MAX_NUM_INT) {
-               dev_err(&spi->dev, "Too many interupts (%d)...\n", nint);
+               dev_err(&spi->dev, "Too many interrupts (%d)...\n", nint);
                return 0;
        }
 
index 51aff4ff7d7c37aafca0b08adb1e9375287b6072..3ddfa4aecb7a9383b9bd0f44a1c5abf51b0b5a5f 100644 (file)
@@ -625,8 +625,7 @@ static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request)
 
 
                        for (i = 0; i < request->n_ssids; i++) {
-                               if (request->ssids[i].ssid &&
-                                   request->ssids[i].ssid_len != 0) {
+                               if (request->ssids[i].ssid_len != 0) {
                                        strHiddenNetwork.net_info[i].ssid = kmalloc(request->ssids[i].ssid_len, GFP_KERNEL);
                                        memcpy(strHiddenNetwork.net_info[i].ssid, request->ssids[i].ssid, request->ssids[i].ssid_len);
                                        strHiddenNetwork.net_info[i].ssid_len = request->ssids[i].ssid_len;
index 3a561df6d370b896a06b689de801ee7bba78ec01..5cc6a82d80818f7c725f5631272a4eb3db510752 100644 (file)
@@ -42,6 +42,8 @@
 #include "host_interface.h"
 #include "wilc_wlan.h"
 #include <linux/wireless.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
 
 #define FLOW_CONTROL_LOWER_THRESHOLD   128
 #define FLOW_CONTROL_UPPER_THRESHOLD   256
@@ -170,15 +172,15 @@ struct wilc {
        struct wilc_vif *vif[NUM_CONCURRENT_IFC];
        u8 open_ifcs;
 
-       struct semaphore txq_add_to_head_cs;
+       struct mutex txq_add_to_head_cs;
        spinlock_t txq_spinlock;
 
        struct mutex rxq_cs;
        struct mutex hif_cs;
 
-       struct semaphore cfg_event;
-       struct semaphore sync_event;
-       struct semaphore txq_event;
+       struct completion cfg_event;
+       struct completion sync_event;
+       struct completion txq_event;
        struct completion txq_thread_started;
 
        struct task_struct *txq_thread;
index 11e16d56ace701248bbd709d4ff0e2b87446b3d2..19a580939dfc09d0f122a3fe8515f4d172542176 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/completion.h>
 #include "wilc_wlan_if.h"
 #include "wilc_wlan.h"
 #include "wilc_wfi_netdevice.h"
@@ -89,7 +90,7 @@ static void wilc_wlan_txq_add_to_tail(struct net_device *dev,
 
        spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
 
-       up(&wilc->txq_event);
+       complete(&wilc->txq_event);
 }
 
 static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
@@ -98,9 +99,7 @@ static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
        unsigned long flags;
        struct wilc *wilc = vif->wilc;
 
-       if (wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
-                                   CFG_PKTS_TIMEOUT))
-               return -1;
+       mutex_lock(&wilc->txq_add_to_head_cs);
 
        spin_lock_irqsave(&wilc->txq_spinlock, flags);
 
@@ -118,8 +117,8 @@ static int wilc_wlan_txq_add_to_head(struct wilc_vif *vif,
        wilc->txq_entries += 1;
 
        spin_unlock_irqrestore(&wilc->txq_spinlock, flags);
-       up(&wilc->txq_add_to_head_cs);
-       up(&wilc->txq_event);
+       mutex_unlock(&wilc->txq_add_to_head_cs);
+       complete(&wilc->txq_event);
 
        return 0;
 }
@@ -287,7 +286,8 @@ static int wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev)
        spin_unlock_irqrestore(&wilc->txq_spinlock, wilc->txq_spinlock_flags);
 
        while (dropped > 0) {
-               wilc_lock_timeout(wilc, &wilc->txq_event, 1);
+               wait_for_completion_timeout(&wilc->txq_event,
+                                               msecs_to_jiffies(1));
                dropped--;
        }
 
@@ -310,7 +310,7 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
        netdev_dbg(vif->ndev, "Adding config packet ...\n");
        if (wilc->quit) {
                netdev_dbg(vif->ndev, "Return due to clear function\n");
-               up(&wilc->cfg_event);
+               complete(&wilc->cfg_event);
                return 0;
        }
 
@@ -571,8 +571,7 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
                if (wilc->quit)
                        break;
 
-               wilc_lock_timeout(wilc, &wilc->txq_add_to_head_cs,
-                                       CFG_PKTS_TIMEOUT);
+               mutex_lock(&wilc->txq_add_to_head_cs);
                wilc_wlan_txq_filter_dup_tcp_ack(dev);
                tqe = wilc_wlan_txq_get_first(wilc);
                i = 0;
@@ -753,7 +752,7 @@ _end_:
                if (ret != 1)
                        break;
        } while (0);
-       up(&wilc->txq_add_to_head_cs);
+       mutex_unlock(&wilc->txq_add_to_head_cs);
 
        wilc->txq_exit = 1;
        *txq_count = wilc->txq_entries;
@@ -770,7 +769,7 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
 
        do {
                if (wilc->quit) {
-                       up(&wilc->cfg_event);
+                       complete(&wilc->cfg_event);
                        break;
                }
                rqe = wilc_wlan_rxq_remove(wilc);
@@ -821,7 +820,7 @@ static void wilc_wlan_handle_rxq(struct wilc *wilc)
                                        wilc_wlan_cfg_indicate_rx(wilc, &buffer[pkt_offset + offset], pkt_len, &rsp);
                                        if (rsp.type == WILC_CFG_RSP) {
                                                if (wilc->cfg_seq_no == rsp.seq_no)
-                                                       up(&wilc->cfg_event);
+                                                       complete(&wilc->cfg_event);
                                        } else if (rsp.type == WILC_CFG_RSP_STATUS) {
                                                wilc_mac_indicate(wilc, WILC_MAC_INDICATE_STATUS);
 
@@ -1229,11 +1228,12 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
                if (wilc_wlan_cfg_commit(vif, WILC_CFG_SET, drv_handler))
                        ret_size = 0;
 
-               if (wilc_lock_timeout(wilc, &wilc->cfg_event,
-                                           CFG_PKTS_TIMEOUT)) {
+               if (!wait_for_completion_timeout(&wilc->cfg_event,
+                                       msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
                        netdev_dbg(vif->ndev, "Set Timed Out\n");
                        ret_size = 0;
                }
+
                wilc->cfg_frame_in_use = 0;
                wilc->cfg_frame_offset = 0;
                wilc->cfg_seq_no += 1;
@@ -1266,8 +1266,8 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
                if (wilc_wlan_cfg_commit(vif, WILC_CFG_QUERY, drv_handler))
                        ret_size = 0;
 
-               if (wilc_lock_timeout(wilc, &wilc->cfg_event,
-                                           CFG_PKTS_TIMEOUT)) {
+               if (!wait_for_completion_timeout(&wilc->cfg_event,
+                                       msecs_to_jiffies(CFG_PKTS_TIMEOUT))) {
                        netdev_dbg(vif->ndev, "Get Timed Out\n");
                        ret_size = 0;
                }
index 4ebb31a35a64303bc31a5f429d8e19d924cbde28..b2bbaa1c60b07e449ddafac601cedca234d936c8 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
 #include "intel_soc_dts_iosf.h"
 
 #define CRITICAL_OFFSET_FROM_TJ_MAX    5000
@@ -42,7 +43,8 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
 }
 
 static const struct x86_cpu_id soc_thermal_ids[] = {
-       { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
+       { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1, 0,
+               BYT_SOC_DTS_APIC_IRQ},
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
index 3840d6b421c402b4caa8974c88450b5958a2e902..5e4fa920686151f94042af7b9e193d3d0486481f 100644 (file)
@@ -93,8 +93,6 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
 #define        SERIAL_XMIT_SIZE        (min(PAGE_SIZE, 4096))
 #endif
 
-#define STD_COM_FLAGS (0)
-
 /* firmware stuff */
 #define ZL_MAX_BLOCKS  16
 #define DRIVER_VERSION 0x02010203
@@ -2288,7 +2286,6 @@ static int cy_get_serial_info(struct cyclades_port *info,
                .closing_wait = info->port.closing_wait,
                .baud_base = info->baud,
                .custom_divisor = info->custom_divisor,
-               .hub6 = 0,              /*!!! */
        };
        return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
 }
@@ -3084,7 +3081,6 @@ static int cy_init_card(struct cyclades_card *cinfo)
 
                info->port.closing_wait = CLOSING_WAIT_DELAY;
                info->port.close_delay = 5 * HZ / 10;
-               info->port.flags = STD_COM_FLAGS;
                init_completion(&info->shutdown_wait);
 
                if (cy_is_Z(cinfo)) {
index 345cebb07ae7933156784e46956c78983acf2e29..2685d59d27245ca5a4d7b00d15e7938cdbd4d837 100644 (file)
@@ -252,20 +252,11 @@ static int ipwireless_get_serial_info(struct ipw_tty *tty,
 {
        struct serial_struct tmp;
 
-       if (!retinfo)
-               return (-EFAULT);
-
        memset(&tmp, 0, sizeof(tmp));
        tmp.type = PORT_UNKNOWN;
        tmp.line = tty->index;
-       tmp.port = 0;
-       tmp.irq = 0;
-       tmp.flags = 0;
        tmp.baud_base = 115200;
-       tmp.close_delay = 0;
-       tmp.closing_wait = 0;
-       tmp.custom_divisor = 0;
-       tmp.hub6 = 0;
+
        if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
                return -EFAULT;
 
index 9325262289f9e191a1c3ecb5d8024df56b7e0d21..25ccef2fe748b5c753ca40da356cc6441e5e2a77 100644 (file)
@@ -323,12 +323,12 @@ static void dashtty_timer(unsigned long ignored)
        if (channel >= 0)
                fetch_data(channel);
 
-       mod_timer_pinned(&poll_timer, jiffies + DA_TTY_POLL);
+       mod_timer(&poll_timer, jiffies + DA_TTY_POLL);
 }
 
 static void add_poll_timer(struct timer_list *poll_timer)
 {
-       setup_timer(poll_timer, dashtty_timer, 0);
+       setup_pinned_timer(poll_timer, dashtty_timer, 0);
        poll_timer->expires = jiffies + DA_TTY_POLL;
 
        /*
index a119176a18551a19c59a0ee1750d89c67818ce05..234123b0c642adac44294201e9fccbe33476756a 100644 (file)
@@ -689,7 +689,7 @@ static void mips_ejtag_fdc_tty_timer(unsigned long opaque)
 
        mips_ejtag_fdc_handle(priv);
        if (!priv->removing)
-               mod_timer_pinned(&priv->poll_timer, jiffies + FDC_TTY_POLL);
+               mod_timer(&priv->poll_timer, jiffies + FDC_TTY_POLL);
 }
 
 /* TTY Port operations */
@@ -1002,7 +1002,7 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
                raw_spin_unlock_irq(&priv->lock);
        } else {
                /* If we didn't get an usable IRQ, poll instead */
-               setup_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
+               setup_pinned_timer(&priv->poll_timer, mips_ejtag_fdc_tty_timer,
                            (unsigned long)priv);
                priv->poll_timer.expires = jiffies + FDC_TTY_POLL;
                /*
index 98d2bd16706d5a483774cd405d22220b757538ed..69294ae154be0f17ae0e56c39d6fa048077b3c9c 100644 (file)
@@ -1219,7 +1219,6 @@ static int mxser_get_serial_info(struct tty_struct *tty,
                .close_delay = info->port.close_delay,
                .closing_wait = info->port.closing_wait,
                .custom_divisor = info->custom_divisor,
-               .hub6 = 0
        };
        if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
                return -EFAULT;
index 215a99237e952d8ceceb71e2c0eb266229746283..122e0e4029fee9e42f4b1ba37f0439728f0d54a4 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/serial_reg.h>
 #include <linux/dmaengine.h>
 
+#include "../serial_mctrl_gpio.h"
+
 struct uart_8250_dma {
        int (*tx_dma)(struct uart_8250_port *p);
        int (*rx_dma)(struct uart_8250_port *p);
@@ -53,11 +55,9 @@ struct old_serial_port {
        unsigned int port;
        unsigned int irq;
        upf_t        flags;
-       unsigned char hub6;
        unsigned char io_type;
        unsigned char __iomem *iomem_base;
        unsigned short iomem_reg_shift;
-       unsigned long irqflags;
 };
 
 struct serial8250_config {
@@ -131,6 +131,47 @@ void serial8250_rpm_put(struct uart_8250_port *p);
 int serial8250_em485_init(struct uart_8250_port *p);
 void serial8250_em485_destroy(struct uart_8250_port *p);
 
+static inline void serial8250_out_MCR(struct uart_8250_port *up, int value)
+{
+       int mctrl_gpio = 0;
+
+       serial_out(up, UART_MCR, value);
+
+       if (value & UART_MCR_RTS)
+               mctrl_gpio |= TIOCM_RTS;
+       if (value & UART_MCR_DTR)
+               mctrl_gpio |= TIOCM_DTR;
+
+       mctrl_gpio_set(up->gpios, mctrl_gpio);
+}
+
+static inline int serial8250_in_MCR(struct uart_8250_port *up)
+{
+       int mctrl, mctrl_gpio = 0;
+
+       mctrl = serial_in(up, UART_MCR);
+
+       /* save current MCR values */
+       if (mctrl & UART_MCR_RTS)
+               mctrl_gpio |= TIOCM_RTS;
+       if (mctrl & UART_MCR_DTR)
+               mctrl_gpio |= TIOCM_DTR;
+
+       mctrl_gpio = mctrl_gpio_get_outputs(up->gpios, &mctrl_gpio);
+
+       if (mctrl_gpio & TIOCM_RTS)
+               mctrl |= UART_MCR_RTS;
+       else
+               mctrl &= ~UART_MCR_RTS;
+
+       if (mctrl_gpio & TIOCM_DTR)
+               mctrl |= UART_MCR_DTR;
+       else
+               mctrl &= ~UART_MCR_DTR;
+
+       return mctrl;
+}
+
 #if defined(__alpha__) && !defined(CONFIG_PCI)
 /*
  * Digital did something really horribly wrong with the OUT1 and OUT2
@@ -237,9 +278,3 @@ static inline int serial_index(struct uart_port *port)
 {
        return port->minor - 64;
 }
-
-#if 0
-#define DEBUG_INTR(fmt...)     printk(fmt)
-#else
-#define DEBUG_INTR(fmt...)     do { } while (0)
-#endif
index 0fbd7c033a251e7d0cb214ed2bbd1ad1f19f0a76..13ad5c3d2e681893aeaa0d675d17c9f04f06b967 100644 (file)
@@ -114,7 +114,7 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
        struct list_head *l, *end = NULL;
        int pass_counter = 0, handled = 0;
 
-       DEBUG_INTR("serial8250_interrupt(%d)...", irq);
+       pr_debug("%s(%d): start\n", __func__, irq);
 
        spin_lock(&i->lock);
 
@@ -144,7 +144,7 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
 
        spin_unlock(&i->lock);
 
-       DEBUG_INTR("end.\n");
+       pr_debug("%s(%d): end\n", __func__, irq);
 
        return IRQ_RETVAL(handled);
 }
@@ -546,10 +546,10 @@ static void __init serial8250_isa_init_ports(void)
 
                port->iobase   = old_serial_port[i].port;
                port->irq      = irq_canonicalize(old_serial_port[i].irq);
-               port->irqflags = old_serial_port[i].irqflags;
+               port->irqflags = 0;
                port->uartclk  = old_serial_port[i].baud_base * 16;
                port->flags    = old_serial_port[i].flags;
-               port->hub6     = old_serial_port[i].hub6;
+               port->hub6     = 0;
                port->membase  = old_serial_port[i].iomem_base;
                port->iotype   = old_serial_port[i].io_type;
                port->regshift = old_serial_port[i].iomem_reg_shift;
@@ -675,7 +675,7 @@ static struct console univ8250_console = {
        .device         = uart_console_device,
        .setup          = univ8250_console_setup,
        .match          = univ8250_console_match,
-       .flags          = CON_PRINTBUFFER | CON_ANYTIME,
+       .flags          = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV,
        .index          = -1,
        .data           = &serial8250_reg,
 };
@@ -974,6 +974,8 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
 
        uart = serial8250_find_match_or_unused(&up->port);
        if (uart && uart->port.type != PORT_8250_CIR) {
+               struct mctrl_gpios *gpios;
+
                if (uart->port.dev)
                        uart_remove_one_port(&serial8250_reg, &uart->port);
 
@@ -1011,6 +1013,13 @@ int serial8250_register_8250_port(struct uart_8250_port *up)
                if (up->port.flags & UPF_FIXED_TYPE)
                        uart->port.type = up->port.type;
 
+               gpios = mctrl_gpio_init(&uart->port, 0);
+               if (IS_ERR(gpios)) {
+                       if (PTR_ERR(gpios) != -ENOSYS)
+                               return PTR_ERR(gpios);
+               } else
+                       uart->gpios = gpios;
+
                serial8250_set_defaults(uart);
 
                /* Possibly override default I/O functions.  */
index 7f33d1c8d1a952bc7c5de2953a796d3a0c044a40..3590d012001f8cf5a10bc8a385ac163abe6b5672 100644 (file)
@@ -145,6 +145,7 @@ void serial8250_rx_dma_flush(struct uart_8250_port *p)
                dmaengine_terminate_all(dma->rxchan);
        }
 }
+EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
 
 int serial8250_request_dma(struct uart_8250_port *p)
 {
index 8d08ff5c4e34b31f359ebe403a5304f3e89a0fb4..85a12f03240247494366a231d24fa86ba042dfdb 100644 (file)
@@ -150,6 +150,7 @@ EARLYCON_DECLARE(uart, early_serial8250_setup);
 OF_EARLYCON_DECLARE(ns16550, "ns16550", early_serial8250_setup);
 OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup);
 OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup);
+OF_EARLYCON_DECLARE(uart, "snps,dw-apb-uart", early_serial8250_setup);
 
 #ifdef CONFIG_SERIAL_8250_OMAP
 
index 870981dd9e39ed1af8ff9ecdddccb42eea349e55..737b4b3957b0bc2470cde5574eed1e5f7230c90a 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/pnp.h>
 #include <linux/kernel.h>
 #include <linux/serial_core.h>
+#include <linux/irq.h>
 #include  "8250.h"
 
 #define ADDR_PORT 0
 #define IO_ADDR2 0x60
 #define LDN 0x7
 
+#define IRQ_MODE       0x70
+#define IRQ_SHARE      BIT(4)
+#define IRQ_MODE_MASK  (BIT(6) | BIT(5))
+#define IRQ_LEVEL_LOW  0
+#define IRQ_EDGE_HIGH  BIT(5)
+
 #define RS485  0xF0
 #define RTS_INVERT BIT(5)
 #define RS485_URA BIT(4)
@@ -176,10 +183,37 @@ static int find_base_port(struct fintek_8250 *pdata, u16 io_address)
        return -ENODEV;
 }
 
+static int fintek_8250_set_irq_mode(struct fintek_8250 *pdata, bool level_mode)
+{
+       int status;
+       u8 tmp;
+
+       status = fintek_8250_enter_key(pdata->base_port, pdata->key);
+       if (status)
+               return status;
+
+       outb(LDN, pdata->base_port + ADDR_PORT);
+       outb(pdata->index, pdata->base_port + DATA_PORT);
+
+       outb(IRQ_MODE, pdata->base_port + ADDR_PORT);
+       tmp = inb(pdata->base_port + DATA_PORT);
+
+       tmp &= ~IRQ_MODE_MASK;
+       tmp |= IRQ_SHARE;
+       if (!level_mode)
+               tmp |= IRQ_EDGE_HIGH;
+
+       outb(tmp, pdata->base_port + DATA_PORT);
+       fintek_8250_exit_key(pdata->base_port);
+       return 0;
+}
+
 int fintek_8250_probe(struct uart_8250_port *uart)
 {
        struct fintek_8250 *pdata;
        struct fintek_8250 probe_data;
+       struct irq_data *irq_data = irq_get_irq_data(uart->port.irq);
+       bool level_mode = irqd_is_level_type(irq_data);
 
        if (find_base_port(&probe_data, uart->port.iobase))
                return -ENODEV;
@@ -192,5 +226,5 @@ int fintek_8250_probe(struct uart_8250_port *uart)
        uart->port.rs485_config = fintek_8250_rs485_config;
        uart->port.private_data = pdata;
 
-       return 0;
+       return fintek_8250_set_irq_mode(pdata, level_mode);
 }
index b0677f610863d3a6a61423623e7627bae35638d2..4d9dc10e265c9c0dd5d850bea791e25c4475ef55 100644 (file)
@@ -48,7 +48,6 @@ static const struct of_device_id of_match[];
 #define UART_MCR_MDCE  BIT(7)
 #define UART_MCR_FCM   BIT(6)
 
-#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
 static struct earlycon_device *early_device;
 
 static uint8_t __init early_in(struct uart_port *port, int offset)
@@ -141,7 +140,6 @@ OF_EARLYCON_DECLARE(jz4775_uart, "ingenic,jz4775-uart",
 EARLYCON_DECLARE(jz4780_uart, ingenic_early_console_setup);
 OF_EARLYCON_DECLARE(jz4780_uart, "ingenic,jz4780-uart",
                    ingenic_early_console_setup);
-#endif /* CONFIG_SERIAL_EARLYCON */
 
 static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
 {
index 86379a79a6a33c6f728892a353454356bbbefb68..339de9cd086612c60d17ef3c581b59df34a0022a 100644 (file)
@@ -96,13 +96,27 @@ static int tng_setup(struct mid8250 *mid, struct uart_port *p)
 static int dnv_handle_irq(struct uart_port *p)
 {
        struct mid8250 *mid = p->private_data;
+       struct uart_8250_port *up = up_to_u8250p(p);
        unsigned int fisr = serial_port_in(p, INTEL_MID_UART_DNV_FISR);
+       u32 status;
        int ret = IRQ_NONE;
-
-       if (fisr & BIT(2))
-               ret |= hsu_dma_irq(&mid->dma_chip, 1);
-       if (fisr & BIT(1))
-               ret |= hsu_dma_irq(&mid->dma_chip, 0);
+       int err;
+
+       if (fisr & BIT(2)) {
+               err = hsu_dma_get_status(&mid->dma_chip, 1, &status);
+               if (err > 0) {
+                       serial8250_rx_dma_flush(up);
+                       ret |= IRQ_HANDLED;
+               } else if (err == 0)
+                       ret |= hsu_dma_do_irq(&mid->dma_chip, 1, status);
+       }
+       if (fisr & BIT(1)) {
+               err = hsu_dma_get_status(&mid->dma_chip, 0, &status);
+               if (err > 0)
+                       ret |= IRQ_HANDLED;
+               else if (err == 0)
+                       ret |= hsu_dma_do_irq(&mid->dma_chip, 0, status);
+       }
        if (fisr & BIT(0))
                ret |= serial8250_handle_irq(p, serial_port_in(p, UART_IIR));
        return ret;
index 3489fbcb7313a1e1cc59e6591beccf313f865eb0..3611ec9bb4fa27407cc50f157321f8e4ed1708fd 100644 (file)
@@ -301,7 +301,7 @@ static struct platform_driver mtk8250_platform_driver = {
 };
 module_platform_driver(mtk8250_platform_driver);
 
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && !defined(MODULE)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
 static int __init early_mtk8250_setup(struct earlycon_device *device,
                                        const char *options)
 {
index 2c44c792d5865addd380a21dfc70f5f62401d8c5..e14982f36a04dff107b11f1f5e75b2b7a587390a 100644 (file)
@@ -134,18 +134,21 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
 
        serial8250_do_set_mctrl(port, mctrl);
 
-       /*
-        * Turn off autoRTS if RTS is lowered and restore autoRTS setting
-        * if RTS is raised
-        */
-       lcr = serial_in(up, UART_LCR);
-       serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
-       if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
-               priv->efr |= UART_EFR_RTS;
-       else
-               priv->efr &= ~UART_EFR_RTS;
-       serial_out(up, UART_EFR, priv->efr);
-       serial_out(up, UART_LCR, lcr);
+       if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
+                                               UART_GPIO_RTS))) {
+               /*
+                * Turn off autoRTS if RTS is lowered and restore autoRTS
+                * setting if RTS is raised
+                */
+               lcr = serial_in(up, UART_LCR);
+               serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+               if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
+                       priv->efr |= UART_EFR_RTS;
+               else
+                       priv->efr &= ~UART_EFR_RTS;
+               serial_out(up, UART_EFR, priv->efr);
+               serial_out(up, UART_LCR, lcr);
+       }
 }
 
 /*
@@ -280,7 +283,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
        serial_out(up, UART_EFR, UART_EFR_ECB);
 
        serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
-       serial_out(up, UART_MCR, UART_MCR_TCRTLR);
+       serial8250_out_MCR(up, UART_MCR_TCRTLR);
        serial_out(up, UART_FCR, up->fcr);
 
        omap8250_update_scr(up, priv);
@@ -296,7 +299,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up)
        serial_out(up, UART_LCR, 0);
 
        /* drop TCR + TLR access, we setup XON/XOFF later */
-       serial_out(up, UART_MCR, up->mcr);
+       serial8250_out_MCR(up, up->mcr);
        serial_out(up, UART_IER, up->ier);
 
        serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
@@ -446,7 +449,9 @@ static void omap_8250_set_termios(struct uart_port *port,
        priv->efr = 0;
        up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
 
-       if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW) {
+       if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW
+               && IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(up->gpios,
+                                                       UART_GPIO_RTS))) {
                /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
                up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
                priv->efr |= UART_EFR_CTS;
index 8dd250fbd36726dc6fc370a75f7f412eee995766..20ebaea5c414d6d70f371aa9d367cd40327fa909 100644 (file)
@@ -1136,11 +1136,11 @@ static int pci_quatech_rqopr(struct uart_8250_port *port)
 static void pci_quatech_wqopr(struct uart_8250_port *port, u8 qopr)
 {
        unsigned long base = port->port.iobase;
-       u8 LCR, val;
+       u8 LCR;
 
        LCR = inb(base + UART_LCR);
        outb(0xBF, base + UART_LCR);
-       val = inb(base + UART_SCR);
+       inb(base + UART_SCR);
        outb(qopr, base + UART_SCR);
        outb(LCR, base + UART_LCR);
 }
@@ -1864,6 +1864,16 @@ pci_wch_ch353_setup(struct serial_private *priv,
        return pci_default_setup(priv, board, port, idx);
 }
 
+static int
+pci_wch_ch355_setup(struct serial_private *priv,
+               const struct pciserial_board *board,
+               struct uart_8250_port *port, int idx)
+{
+       port->port.flags |= UPF_FIXED_TYPE;
+       port->port.type = PORT_16550A;
+       return pci_default_setup(priv, board, port, idx);
+}
+
 static int
 pci_wch_ch38x_setup(struct serial_private *priv,
                    const struct pciserial_board *board,
@@ -1915,6 +1925,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
 #define PCI_DEVICE_ID_WCH_CH353_2S1PF  0x5046
 #define PCI_DEVICE_ID_WCH_CH353_1S1P   0x5053
 #define PCI_DEVICE_ID_WCH_CH353_2S1P   0x7053
+#define PCI_DEVICE_ID_WCH_CH355_4S     0x7173
 #define PCI_VENDOR_ID_AGESTAR          0x5372
 #define PCI_DEVICE_ID_AGESTAR_9375     0x6872
 #define PCI_VENDOR_ID_ASIX             0x9710
@@ -2618,6 +2629,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = pci_wch_ch353_setup,
        },
+       /* WCH CH355 4S card (16550 clone) */
+       {
+               .vendor         = PCI_VENDOR_ID_WCH,
+               .device         = PCI_DEVICE_ID_WCH_CH355_4S,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_wch_ch355_setup,
+       },
        /* WCH CH382 2S card (16850 clone) */
        {
                .vendor         = PCIE_VENDOR_ID_WCH,
@@ -3812,6 +3831,7 @@ static const struct pci_device_id blacklist[] = {
        /* multi-io cards handled by parport_serial */
        { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
        { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
+       { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
        { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
        { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
 
@@ -5567,6 +5587,10 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_b0_bt_2_115200 },
 
+       {       PCI_VENDOR_ID_WCH, PCI_DEVICE_ID_WCH_CH355_4S,
+               PCI_ANY_ID, PCI_ANY_ID,
+               0, 0, pbn_b0_bt_4_115200 },
+
        {       PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_wch382_2 },
index d4036038a4ddbe7bfb50406b2234381698bbe3ca..7481b95c6d846770b10f77a64e8231c7b2ad4e8e 100644 (file)
@@ -527,13 +527,13 @@ static void serial8250_clear_fifos(struct uart_8250_port *p)
 
 static inline void serial8250_em485_rts_after_send(struct uart_8250_port *p)
 {
-       unsigned char mcr = serial_in(p, UART_MCR);
+       unsigned char mcr = serial8250_in_MCR(p);
 
        if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
                mcr |= UART_MCR_RTS;
        else
                mcr &= ~UART_MCR_RTS;
-       serial_out(p, UART_MCR, mcr);
+       serial8250_out_MCR(p, mcr);
 }
 
 static void serial8250_em485_handle_start_tx(unsigned long arg);
@@ -785,10 +785,10 @@ static int size_fifo(struct uart_8250_port *up)
        old_lcr = serial_in(up, UART_LCR);
        serial_out(up, UART_LCR, 0);
        old_fcr = serial_in(up, UART_FCR);
-       old_mcr = serial_in(up, UART_MCR);
+       old_mcr = serial8250_in_MCR(up);
        serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
                    UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
-       serial_out(up, UART_MCR, UART_MCR_LOOP);
+       serial8250_out_MCR(up, UART_MCR_LOOP);
        serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
        old_dl = serial_dl_read(up);
        serial_dl_write(up, 0x0001);
@@ -800,7 +800,7 @@ static int size_fifo(struct uart_8250_port *up)
             (count < 256); count++)
                serial_in(up, UART_RX);
        serial_out(up, UART_FCR, old_fcr);
-       serial_out(up, UART_MCR, old_mcr);
+       serial8250_out_MCR(up, old_mcr);
        serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
        serial_dl_write(up, old_dl);
        serial_out(up, UART_LCR, old_lcr);
@@ -1040,17 +1040,17 @@ static void autoconfig_16550a(struct uart_8250_port *up)
         * it's changed. If so, set baud_base in EXCR2 to 921600. -- dwmw2
         */
        serial_out(up, UART_LCR, 0);
-       status1 = serial_in(up, UART_MCR);
+       status1 = serial8250_in_MCR(up);
        serial_out(up, UART_LCR, 0xE0);
        status2 = serial_in(up, 0x02); /* EXCR1 */
 
        if (!((status2 ^ status1) & UART_MCR_LOOP)) {
                serial_out(up, UART_LCR, 0);
-               serial_out(up, UART_MCR, status1 ^ UART_MCR_LOOP);
+               serial8250_out_MCR(up, status1 ^ UART_MCR_LOOP);
                serial_out(up, UART_LCR, 0xE0);
                status2 = serial_in(up, 0x02); /* EXCR1 */
                serial_out(up, UART_LCR, 0);
-               serial_out(up, UART_MCR, status1);
+               serial8250_out_MCR(up, status1);
 
                if ((status2 ^ status1) & UART_MCR_LOOP) {
                        unsigned short quot;
@@ -1224,7 +1224,7 @@ static void autoconfig(struct uart_8250_port *up)
                }
        }
 
-       save_mcr = serial_in(up, UART_MCR);
+       save_mcr = serial8250_in_MCR(up);
        save_lcr = serial_in(up, UART_LCR);
 
        /*
@@ -1237,9 +1237,9 @@ static void autoconfig(struct uart_8250_port *up)
         * that conflicts with COM 1-4 --- we hope!
         */
        if (!(port->flags & UPF_SKIP_TEST)) {
-               serial_out(up, UART_MCR, UART_MCR_LOOP | 0x0A);
+               serial8250_out_MCR(up, UART_MCR_LOOP | 0x0A);
                status1 = serial_in(up, UART_MSR) & 0xF0;
-               serial_out(up, UART_MCR, save_mcr);
+               serial8250_out_MCR(up, save_mcr);
                if (status1 != 0x90) {
                        spin_unlock_irqrestore(&port->lock, flags);
                        DEBUG_AUTOCONF("LOOP test failed (%02x) ",
@@ -1305,7 +1305,7 @@ static void autoconfig(struct uart_8250_port *up)
        if (port->type == PORT_RSA)
                serial_out(up, UART_RSA_FRR, 0);
 #endif
-       serial_out(up, UART_MCR, save_mcr);
+       serial8250_out_MCR(up, save_mcr);
        serial8250_clear_fifos(up);
        serial_in(up, UART_RX);
        if (up->capabilities & UART_CAP_UUE)
@@ -1353,19 +1353,18 @@ static void autoconfig_irq(struct uart_8250_port *up)
 
        /* forget possible initially masked and pending IRQ */
        probe_irq_off(probe_irq_on());
-       save_mcr = serial_in(up, UART_MCR);
+       save_mcr = serial8250_in_MCR(up);
        save_ier = serial_in(up, UART_IER);
-       serial_out(up, UART_MCR, UART_MCR_OUT1 | UART_MCR_OUT2);
+       serial8250_out_MCR(up, UART_MCR_OUT1 | UART_MCR_OUT2);
 
        irqs = probe_irq_on();
-       serial_out(up, UART_MCR, 0);
+       serial8250_out_MCR(up, 0);
        udelay(10);
        if (port->flags & UPF_FOURPORT) {
-               serial_out(up, UART_MCR,
-                           UART_MCR_DTR | UART_MCR_RTS);
+               serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
        } else {
-               serial_out(up, UART_MCR,
-                           UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
+               serial8250_out_MCR(up,
+                       UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
        }
        serial_out(up, UART_IER, 0x0f); /* enable all intrs */
        serial_in(up, UART_LSR);
@@ -1376,7 +1375,7 @@ static void autoconfig_irq(struct uart_8250_port *up)
        udelay(20);
        irq = probe_irq_off(irqs);
 
-       serial_out(up, UART_MCR, save_mcr);
+       serial8250_out_MCR(up, save_mcr);
        serial_out(up, UART_IER, save_ier);
 
        if (port->flags & UPF_FOURPORT)
@@ -1549,14 +1548,14 @@ static inline void start_tx_rs485(struct uart_port *port)
        del_timer(&em485->stop_tx_timer);
        em485->active_timer = NULL;
 
-       mcr = serial_in(up, UART_MCR);
+       mcr = serial8250_in_MCR(up);
        if (!!(up->port.rs485.flags & SER_RS485_RTS_ON_SEND) !=
            !!(mcr & UART_MCR_RTS)) {
                if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
                        mcr |= UART_MCR_RTS;
                else
                        mcr &= ~UART_MCR_RTS;
-               serial_out(up, UART_MCR, mcr);
+               serial8250_out_MCR(up, mcr);
 
                if (up->port.rs485.delay_rts_before_send > 0) {
                        em485->active_timer = &em485->start_tx_timer;
@@ -1619,6 +1618,8 @@ static void serial8250_disable_ms(struct uart_port *port)
        if (up->bugs & UART_BUG_NOMSR)
                return;
 
+       mctrl_gpio_disable_ms(up->gpios);
+
        up->ier &= ~UART_IER_MSI;
        serial_port_out(port, UART_IER, up->ier);
 }
@@ -1631,6 +1632,8 @@ static void serial8250_enable_ms(struct uart_port *port)
        if (up->bugs & UART_BUG_NOMSR)
                return;
 
+       mctrl_gpio_enable_ms(up->gpios);
+
        up->ier |= UART_IER_MSI;
 
        serial8250_rpm_get(up);
@@ -1686,7 +1689,7 @@ static void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
                lsr &= port->read_status_mask;
 
                if (lsr & UART_LSR_BI) {
-                       DEBUG_INTR("handling break....");
+                       pr_debug("%s: handling break\n", __func__);
                        flag = TTY_BREAK;
                } else if (lsr & UART_LSR_PE)
                        flag = TTY_PARITY;
@@ -1757,7 +1760,7 @@ void serial8250_tx_chars(struct uart_8250_port *up)
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
 
-       DEBUG_INTR("THRE...");
+       pr_debug("%s: THRE\n", __func__);
 
        /*
         * With RPM enabled, we have to wait until the FIFO is empty before the
@@ -1823,7 +1826,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
 
        status = serial_port_in(port, UART_LSR);
 
-       DEBUG_INTR("status = %x...", status);
+       pr_debug("%s: status = %x\n", __func__, status);
 
        if (status & (UART_LSR_DR | UART_LSR_BI)) {
                if (!up->dma || handle_rx_dma(up, iir))
@@ -1861,7 +1864,6 @@ static int serial8250_default_handle_irq(struct uart_port *port)
  */
 static int exar_handle_irq(struct uart_port *port)
 {
-       unsigned char int0, int1, int2, int3;
        unsigned int iir = serial_port_in(port, UART_IIR);
        int ret;
 
@@ -1869,10 +1871,10 @@ static int exar_handle_irq(struct uart_port *port)
 
        if ((port->type == PORT_XR17V35X) ||
           (port->type == PORT_XR17D15X)) {
-               int0 = serial_port_in(port, 0x80);
-               int1 = serial_port_in(port, 0x81);
-               int2 = serial_port_in(port, 0x82);
-               int3 = serial_port_in(port, 0x83);
+               serial_port_in(port, 0x80);
+               serial_port_in(port, 0x81);
+               serial_port_in(port, 0x82);
+               serial_port_in(port, 0x83);
        }
 
        return ret;
@@ -1915,7 +1917,8 @@ unsigned int serial8250_do_get_mctrl(struct uart_port *port)
                ret |= TIOCM_DSR;
        if (status & UART_MSR_CTS)
                ret |= TIOCM_CTS;
-       return ret;
+
+       return mctrl_gpio_get(up->gpios, &ret);
 }
 EXPORT_SYMBOL_GPL(serial8250_do_get_mctrl);
 
@@ -1944,7 +1947,7 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
 
        mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
 
-       serial_port_out(port, UART_MCR, mcr);
+       serial8250_out_MCR(up, mcr);
 }
 EXPORT_SYMBOL_GPL(serial8250_do_set_mctrl);
 
@@ -1994,8 +1997,6 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
 
        /* Wait up to 1s for flow control if necessary */
        if (up->port.flags & UPF_CONS_FLOW) {
-               unsigned int tmout;
-
                for (tmout = 1000000; tmout; tmout--) {
                        unsigned int msr = serial_in(up, UART_MSR);
                        up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
@@ -3093,7 +3094,7 @@ static void serial8250_console_restore(struct uart_8250_port *up)
 
        serial8250_set_divisor(port, baud, quot, frac);
        serial_port_out(port, UART_LCR, up->lcr);
-       serial_port_out(port, UART_MCR, UART_MCR_DTR | UART_MCR_RTS);
+       serial8250_out_MCR(up, UART_MCR_DTR | UART_MCR_RTS);
 }
 
 /*
index efd1f9c047b1c0ce0e75d200b07bab8f9a4424a0..b8d9c8c9d02a9762a2be77861554f13e49488a0c 100644 (file)
@@ -35,7 +35,7 @@ struct uniphier8250_priv {
        spinlock_t atomic_write_lock;
 };
 
-#if defined(CONFIG_SERIAL_8250_CONSOLE) && !defined(MODULE)
+#ifdef CONFIG_SERIAL_8250_CONSOLE
 static int __init uniphier_early_console_setup(struct earlycon_device *device,
                                               const char *options)
 {
index e46761d20f7b4c780784d48c9f8de23f47f5134c..c9ec839a5ddf447a6b7b3fe3158ad4104e3024c2 100644 (file)
@@ -6,6 +6,7 @@
 config SERIAL_8250
        tristate "8250/16550 and compatible serial support"
        select SERIAL_CORE
+       select SERIAL_MCTRL_GPIO if GPIOLIB
        ---help---
          This selects whether you want to include the driver for the standard
          serial ports.  The standard answer is Y.  People who might say N
@@ -387,7 +388,8 @@ config SERIAL_8250_MT6577
 
 config SERIAL_8250_UNIPHIER
        tristate "Support for UniPhier on-chip UART"
-       depends on SERIAL_8250 && ARCH_UNIPHIER
+       depends on SERIAL_8250
+       depends on ARCH_UNIPHIER || COMPILE_TEST
        help
          If you have a UniPhier based board and want to use the on-chip
          serial ports, say Y to this option. If unsure, say N.
@@ -395,7 +397,7 @@ config SERIAL_8250_UNIPHIER
 config SERIAL_8250_INGENIC
        tristate "Support for Ingenic SoC serial ports"
        depends on SERIAL_8250
-       depends on (OF_FLATTREE && SERIAL_8250_CONSOLE) || !SERIAL_EARLYCON
+       depends on OF_FLATTREE
        depends on MIPS || COMPILE_TEST
        help
          If you have a system using an Ingenic SoC and wish to make use of
index 7e3a58c8bb67cd80020408a24f216332fc709d9f..518db24a5b36df38e03161d251d68f949453ee05 100644 (file)
@@ -736,6 +736,7 @@ config SERIAL_SH_SCI
        tristate "SuperH SCI(F) serial port support"
        depends on SUPERH || ARCH_RENESAS || H8300 || COMPILE_TEST
        select SERIAL_CORE
+       select SERIAL_MCTRL_GPIO if GPIOLIB
 
 config SERIAL_SH_SCI_NR_UARTS
        int "Maximum number of SCI(F) serial ports"
@@ -1477,7 +1478,7 @@ config SERIAL_MPS2_UART_CONSOLE
 
 config SERIAL_MPS2_UART
        bool "MPS2 UART port"
-       depends on ARM || COMPILE_TEST
+       depends on ARCH_MPS2 || COMPILE_TEST
        select SERIAL_CORE
        help
          This driver support the UART ports on ARM MPS2.
index 1b7331e40d795e1ec5482d6a82871bd5ce050b60..8a9e213387a79fcc335caad27520a68edf03f446 100644 (file)
@@ -2553,11 +2553,17 @@ static int sbsa_uart_probe(struct platform_device *pdev)
        if (!uap)
                return -ENOMEM;
 
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "cannot obtain irq\n");
+               return ret;
+       }
+       uap->port.irq   = ret;
+
        uap->reg_offset = vendor_sbsa.reg_offset;
        uap->vendor     = &vendor_sbsa;
        uap->fifosize   = 32;
        uap->port.iotype = vendor_sbsa.access_32b ? UPIO_MEM32 : UPIO_MEM;
-       uap->port.irq   = platform_get_irq(pdev, 0);
        uap->port.ops   = &sbsa_uart_pops;
        uap->fixed_baud = baudrate;
 
index 954941dd812478e4eb9120b40412dd8ba9fadaef..2eaa18ddef6171640729bf8a770363331f5edaa3 100644 (file)
@@ -108,6 +108,12 @@ struct atmel_uart_char {
        u16             ch;
 };
 
+/*
+ * Be careful, the real size of the ring buffer is
+ * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
+ * can contain up to 1024 characters in PIO mode and up to 4096 characters in
+ * DMA mode.
+ */
 #define ATMEL_SERIAL_RINGSIZE 1024
 
 /*
@@ -145,10 +151,10 @@ struct atmel_uart_port {
        dma_cookie_t                    cookie_rx;
        struct scatterlist              sg_tx;
        struct scatterlist              sg_rx;
-       struct tasklet_struct   tasklet;
-       unsigned int            irq_status;
+       struct tasklet_struct   tasklet_rx;
+       struct tasklet_struct   tasklet_tx;
+       atomic_t                tasklet_shutdown;
        unsigned int            irq_status_prev;
-       unsigned int            status_change;
        unsigned int            tx_len;
 
        struct circ_buf         rx_ring;
@@ -281,6 +287,13 @@ static bool atmel_use_fifo(struct uart_port *port)
        return atmel_port->fifo_size;
 }
 
+static void atmel_tasklet_schedule(struct atmel_uart_port *atmel_port,
+                                  struct tasklet_struct *t)
+{
+       if (!atomic_read(&atmel_port->tasklet_shutdown))
+               tasklet_schedule(t);
+}
+
 static unsigned int atmel_get_lines_status(struct uart_port *port)
 {
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -482,19 +495,21 @@ static void atmel_start_tx(struct uart_port *port)
 {
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
-       if (atmel_use_pdc_tx(port)) {
-               if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN)
-                       /* The transmitter is already running.  Yes, we
-                          really need this.*/
-                       return;
+       if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR)
+                                      & ATMEL_PDC_TXTEN))
+               /* The transmitter is already running.  Yes, we
+                  really need this.*/
+               return;
 
+       if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
                if ((port->rs485.flags & SER_RS485_ENABLED) &&
                    !(port->rs485.flags & SER_RS485_RX_DURING_TX))
                        atmel_stop_rx(port);
 
+       if (atmel_use_pdc_tx(port))
                /* re-enable PDC transmit */
                atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
-       }
+
        /* Enable interrupts */
        atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
 }
@@ -710,7 +725,7 @@ static void atmel_rx_chars(struct uart_port *port)
                status = atmel_uart_readl(port, ATMEL_US_CSR);
        }
 
-       tasklet_schedule(&atmel_port->tasklet);
+       atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
 }
 
 /*
@@ -781,7 +796,7 @@ static void atmel_complete_tx_dma(void *arg)
         * remaining data from the beginning of xmit->buf to xmit->head.
         */
        if (!uart_circ_empty(xmit))
-               tasklet_schedule(&atmel_port->tasklet);
+               atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
 
        spin_unlock_irqrestore(&port->lock, flags);
 }
@@ -966,7 +981,7 @@ static void atmel_complete_rx_dma(void *arg)
        struct uart_port *port = arg;
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
-       tasklet_schedule(&atmel_port->tasklet);
+       atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
 }
 
 static void atmel_release_rx_dma(struct uart_port *port)
@@ -1006,7 +1021,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
        if (dmastat == DMA_ERROR) {
                dev_dbg(port->dev, "Get residue error, restart tasklet\n");
                atmel_uart_writel(port, ATMEL_US_IER, ATMEL_US_TIMEOUT);
-               tasklet_schedule(&atmel_port->tasklet);
+               atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_rx);
                return;
        }
 
@@ -1160,8 +1175,11 @@ static void atmel_uart_timer_callback(unsigned long data)
        struct uart_port *port = (void *)data;
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
-       tasklet_schedule(&atmel_port->tasklet);
-       mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port));
+       if (!atomic_read(&atmel_port->tasklet_shutdown)) {
+               tasklet_schedule(&atmel_port->tasklet_rx);
+               mod_timer(&atmel_port->uart_timer,
+                         jiffies + uart_poll_timeout(port));
+       }
 }
 
 /*
@@ -1183,7 +1201,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
                if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) {
                        atmel_uart_writel(port, ATMEL_US_IDR,
                                          (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT));
-                       tasklet_schedule(&atmel_port->tasklet);
+                       atmel_tasklet_schedule(atmel_port,
+                                              &atmel_port->tasklet_rx);
                }
 
                if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE |
@@ -1195,7 +1214,8 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
                if (pending & ATMEL_US_TIMEOUT) {
                        atmel_uart_writel(port, ATMEL_US_IDR,
                                          ATMEL_US_TIMEOUT);
-                       tasklet_schedule(&atmel_port->tasklet);
+                       atmel_tasklet_schedule(atmel_port,
+                                              &atmel_port->tasklet_rx);
                }
        }
 
@@ -1225,7 +1245,7 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
                /* Either PDC or interrupt transmission */
                atmel_uart_writel(port, ATMEL_US_IDR,
                                  atmel_port->tx_done_mask);
-               tasklet_schedule(&atmel_port->tasklet);
+               atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
        }
 }
 
@@ -1237,14 +1257,27 @@ atmel_handle_status(struct uart_port *port, unsigned int pending,
                    unsigned int status)
 {
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+       unsigned int status_change;
 
        if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC
                                | ATMEL_US_CTSIC)) {
-               atmel_port->irq_status = status;
-               atmel_port->status_change = atmel_port->irq_status ^
-                                           atmel_port->irq_status_prev;
+               status_change = status ^ atmel_port->irq_status_prev;
                atmel_port->irq_status_prev = status;
-               tasklet_schedule(&atmel_port->tasklet);
+
+               if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
+                                       | ATMEL_US_DCD | ATMEL_US_CTS)) {
+                       /* TODO: All reads to CSR will clear these interrupts! */
+                       if (status_change & ATMEL_US_RI)
+                               port->icount.rng++;
+                       if (status_change & ATMEL_US_DSR)
+                               port->icount.dsr++;
+                       if (status_change & ATMEL_US_DCD)
+                               uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
+                       if (status_change & ATMEL_US_CTS)
+                               uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
+
+                       wake_up_interruptible(&port->state->port.delta_msr_wait);
+               }
        }
 }
 
@@ -1571,37 +1604,25 @@ static int atmel_prepare_rx_pdc(struct uart_port *port)
 /*
  * tasklet handling tty stuff outside the interrupt handler.
  */
-static void atmel_tasklet_func(unsigned long data)
+static void atmel_tasklet_rx_func(unsigned long data)
 {
        struct uart_port *port = (struct uart_port *)data;
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
-       unsigned int status = atmel_port->irq_status;
-       unsigned int status_change = atmel_port->status_change;
 
        /* The interrupt handler does not take the lock */
        spin_lock(&port->lock);
-
-       atmel_port->schedule_tx(port);
-
-       if (status_change & (ATMEL_US_RI | ATMEL_US_DSR
-                               | ATMEL_US_DCD | ATMEL_US_CTS)) {
-               /* TODO: All reads to CSR will clear these interrupts! */
-               if (status_change & ATMEL_US_RI)
-                       port->icount.rng++;
-               if (status_change & ATMEL_US_DSR)
-                       port->icount.dsr++;
-               if (status_change & ATMEL_US_DCD)
-                       uart_handle_dcd_change(port, !(status & ATMEL_US_DCD));
-               if (status_change & ATMEL_US_CTS)
-                       uart_handle_cts_change(port, !(status & ATMEL_US_CTS));
-
-               wake_up_interruptible(&port->state->port.delta_msr_wait);
-
-               atmel_port->status_change = 0;
-       }
-
        atmel_port->schedule_rx(port);
+       spin_unlock(&port->lock);
+}
+
+static void atmel_tasklet_tx_func(unsigned long data)
+{
+       struct uart_port *port = (struct uart_port *)data;
+       struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
+       /* The interrupt handler does not take the lock */
+       spin_lock(&port->lock);
+       atmel_port->schedule_tx(port);
        spin_unlock(&port->lock);
 }
 
@@ -1785,7 +1806,11 @@ static int atmel_startup(struct uart_port *port)
                return retval;
        }
 
-       tasklet_enable(&atmel_port->tasklet);
+       atomic_set(&atmel_port->tasklet_shutdown, 0);
+       tasklet_init(&atmel_port->tasklet_rx, atmel_tasklet_rx_func,
+                       (unsigned long)port);
+       tasklet_init(&atmel_port->tasklet_tx, atmel_tasklet_tx_func,
+                       (unsigned long)port);
 
        /*
         * Initialize DMA (if necessary)
@@ -1833,7 +1858,6 @@ static int atmel_startup(struct uart_port *port)
 
        /* Save current CSR for comparison in atmel_tasklet_func() */
        atmel_port->irq_status_prev = atmel_get_lines_status(port);
-       atmel_port->irq_status = atmel_port->irq_status_prev;
 
        /*
         * Finally, enable the serial port
@@ -1905,29 +1929,36 @@ static void atmel_shutdown(struct uart_port *port)
 {
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
+       /* Disable interrupts at device level */
+       atmel_uart_writel(port, ATMEL_US_IDR, -1);
+
+       /* Prevent spurious interrupts from scheduling the tasklet */
+       atomic_inc(&atmel_port->tasklet_shutdown);
+
        /*
         * Prevent any tasklets being scheduled during
         * cleanup
         */
        del_timer_sync(&atmel_port->uart_timer);
 
+       /* Make sure that no interrupt is on the fly */
+       synchronize_irq(port->irq);
+
        /*
         * Clear out any scheduled tasklets before
         * we destroy the buffers
         */
-       tasklet_disable(&atmel_port->tasklet);
-       tasklet_kill(&atmel_port->tasklet);
+       tasklet_kill(&atmel_port->tasklet_rx);
+       tasklet_kill(&atmel_port->tasklet_tx);
 
        /*
         * Ensure everything is stopped and
-        * disable all interrupts, port and break condition.
+        * disable port and break condition.
         */
        atmel_stop_rx(port);
        atmel_stop_tx(port);
 
        atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_RSTSTA);
-       atmel_uart_writel(port, ATMEL_US_IDR, -1);
-
 
        /*
         * Shut-down the DMA.
@@ -2311,10 +2342,6 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
        port->irq       = pdev->resource[1].start;
        port->rs485_config      = atmel_config_rs485;
 
-       tasklet_init(&atmel_port->tasklet, atmel_tasklet_func,
-                       (unsigned long)port);
-       tasklet_disable(&atmel_port->tasklet);
-
        memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
 
        if (pdata && pdata->regs) {
@@ -2699,6 +2726,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
        atmel_port->uart.line = ret;
        atmel_serial_probe_fifos(atmel_port, pdev);
 
+       atomic_set(&atmel_port->tasklet_shutdown, 0);
        spin_lock_init(&atmel_port->lock_suspended);
 
        ret = atmel_init_port(atmel_port, pdev);
@@ -2795,7 +2823,8 @@ static int atmel_serial_remove(struct platform_device *pdev)
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
        int ret = 0;
 
-       tasklet_kill(&atmel_port->tasklet);
+       tasklet_kill(&atmel_port->tasklet_rx);
+       tasklet_kill(&atmel_port->tasklet_tx);
 
        device_init_wakeup(&pdev->dev, 0);
 
index c28e5c24da16041087fcc463e8bc19cb45ee23a0..5108fab953aadf665dfb8dc6d8efab9d69781556 100644 (file)
@@ -813,8 +813,12 @@ static int bcm_uart_probe(struct platform_device *pdev)
        struct clk *clk;
        int ret;
 
-       if (pdev->dev.of_node)
-               pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+       if (pdev->dev.of_node) {
+               pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
+
+               if (pdev->id < 0)
+                       pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
+       }
 
        if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
                return -EINVAL;
index 3d790033744efd060bf42dad1f494a5ee4da0155..7f95f782a48561e5ef466230c1a54dc8cf17455d 100644 (file)
@@ -1830,7 +1830,13 @@ static int lpuart_probe(struct platform_device *pdev)
        sport->port.dev = &pdev->dev;
        sport->port.type = PORT_LPUART;
        sport->port.iotype = UPIO_MEM;
-       sport->port.irq = platform_get_irq(pdev, 0);
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "cannot obtain irq\n");
+               return ret;
+       }
+       sport->port.irq = ret;
+
        if (sport->lpuart32)
                sport->port.ops = &lpuart32_pops;
        else
index 68765f7c2645be08ac3f7651214a119a35150d34..218b7118e85d1288a8d812f05191be5ec5aaa684 100644 (file)
@@ -30,7 +30,6 @@
 #define SUPPORT_SYSRQ
 #endif
 
-#include <linux/module.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/ioport.h>
@@ -51,9 +50,6 @@
 
 #define PASS_LIMIT     256
 
-/* Standard COM flags */
-#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
-
 static const struct {
        unsigned int port;
        unsigned int irq;
@@ -892,7 +888,7 @@ static void __init m32r_sio_init_ports(void)
                up->port.iobase   = old_serial_port[i].port;
                up->port.irq      = irq_canonicalize(old_serial_port[i].irq);
                up->port.uartclk  = BAUD_RATE * 16;
-               up->port.flags    = STD_COM_FLAGS;
+               up->port.flags    = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
                up->port.membase  = 0;
                up->port.iotype   = 0;
                up->port.regshift = 0;
@@ -1060,19 +1056,4 @@ static int __init m32r_sio_init(void)
 
        return ret;
 }
-
-static void __exit m32r_sio_exit(void)
-{
-       int i;
-
-       for (i = 0; i < UART_NR; i++)
-               uart_remove_one_port(&m32r_sio_reg, &m32r_sio_ports[i].port);
-
-       uart_unregister_driver(&m32r_sio_reg);
-}
-
-module_init(m32r_sio_init);
-module_exit(m32r_sio_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Generic M32R SIO serial driver");
+device_initcall(m32r_sio_init);
index 3f6e0ab725fe29e1bfe9355c824528507d38b2aa..9360801df3c4f674311100efe2b19fca15f79b75 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
  *
- *  Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
+ *  Copyright (C) 2012-2016 Alexander Shiyan <shc_work@mail.ru>
  *
  *  Based on max3100.c, by Christian Pellegrin <chripell@evolware.org>
  *  Based on max3110.c, by Feng Tang <feng.tang@intel.com>
@@ -32,6 +32,7 @@
 #define MAX310X_NAME                   "max310x"
 #define MAX310X_MAJOR                  204
 #define MAX310X_MINOR                  209
+#define MAX310X_UART_NRMAX             16
 
 /* MAX310X register definitions */
 #define MAX310X_RHR_REG                        (0x00) /* RX FIFO */
 #define MAX310X_LCR_FORCEPARITY_BIT    (1 << 5) /* 9-bit multidrop parity */
 #define MAX310X_LCR_TXBREAK_BIT                (1 << 6) /* TX break enable */
 #define MAX310X_LCR_RTS_BIT            (1 << 7) /* RTS pin control */
-#define MAX310X_LCR_WORD_LEN_5         (0x00)
-#define MAX310X_LCR_WORD_LEN_6         (0x01)
-#define MAX310X_LCR_WORD_LEN_7         (0x02)
-#define MAX310X_LCR_WORD_LEN_8         (0x03)
 
 /* IRDA register bits */
 #define MAX310X_IRDA_IRDAEN_BIT                (1 << 0) /* IRDA mode enable */
@@ -262,10 +259,10 @@ struct max310x_one {
        struct uart_port        port;
        struct work_struct      tx_work;
        struct work_struct      md_work;
+       struct work_struct      rs_work;
 };
 
 struct max310x_port {
-       struct uart_driver      uart;
        struct max310x_devtype  *devtype;
        struct regmap           *regmap;
        struct mutex            mutex;
@@ -276,6 +273,17 @@ struct max310x_port {
        struct max310x_one      p[0];
 };
 
+static struct uart_driver max310x_uart = {
+       .owner          = THIS_MODULE,
+       .driver_name    = MAX310X_NAME,
+       .dev_name       = "ttyMAX",
+       .major          = MAX310X_MAJOR,
+       .minor          = MAX310X_MINOR,
+       .nr             = MAX310X_UART_NRMAX,
+};
+
+static DECLARE_BITMAP(max310x_lines, MAX310X_UART_NRMAX);
+
 static u8 max310x_port_read(struct uart_port *port, u8 reg)
 {
        struct max310x_port *s = dev_get_drvdata(port->dev);
@@ -594,9 +602,7 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
        unsigned int sts, ch, flag;
 
        if (unlikely(rxlen >= port->fifosize)) {
-               dev_warn_ratelimited(port->dev,
-                                    "Port %i: Possible RX FIFO overrun\n",
-                                    port->line);
+               dev_warn_ratelimited(port->dev, "Possible RX FIFO overrun\n");
                port->icount.buf_overrun++;
                /* Ensure sanity of RX level */
                rxlen = port->fifosize;
@@ -715,13 +721,13 @@ static irqreturn_t max310x_ist(int irq, void *dev_id)
 {
        struct max310x_port *s = (struct max310x_port *)dev_id;
 
-       if (s->uart.nr > 1) {
+       if (s->devtype->nr > 1) {
                do {
                        unsigned int val = ~0;
 
                        WARN_ON_ONCE(regmap_read(s->regmap,
                                                 MAX310X_GLOBALIRQ_REG, &val));
-                       val = ((1 << s->uart.nr) - 1) & ~val;
+                       val = ((1 << s->devtype->nr) - 1) & ~val;
                        if (!val)
                                break;
                        max310x_port_irq(s, fls(val) - 1);
@@ -796,7 +802,7 @@ static void max310x_set_termios(struct uart_port *port,
                                struct ktermios *termios,
                                struct ktermios *old)
 {
-       unsigned int lcr, flow = 0;
+       unsigned int lcr = 0, flow = 0;
        int baud;
 
        /* Mask termios capabilities we don't support */
@@ -805,17 +811,16 @@ static void max310x_set_termios(struct uart_port *port,
        /* Word size */
        switch (termios->c_cflag & CSIZE) {
        case CS5:
-               lcr = MAX310X_LCR_WORD_LEN_5;
                break;
        case CS6:
-               lcr = MAX310X_LCR_WORD_LEN_6;
+               lcr = MAX310X_LCR_LENGTH0_BIT;
                break;
        case CS7:
-               lcr = MAX310X_LCR_WORD_LEN_7;
+               lcr = MAX310X_LCR_LENGTH1_BIT;
                break;
        case CS8:
        default:
-               lcr = MAX310X_LCR_WORD_LEN_8;
+               lcr = MAX310X_LCR_LENGTH1_BIT | MAX310X_LCR_LENGTH0_BIT;
                break;
        }
 
@@ -877,36 +882,45 @@ static void max310x_set_termios(struct uart_port *port,
        uart_update_timeout(port, termios->c_cflag, baud);
 }
 
-static int max310x_rs485_config(struct uart_port *port,
-                               struct serial_rs485 *rs485)
+static void max310x_rs_proc(struct work_struct *ws)
 {
+       struct max310x_one *one = container_of(ws, struct max310x_one, rs_work);
        unsigned int val;
 
-       if (rs485->delay_rts_before_send > 0x0f ||
-                   rs485->delay_rts_after_send > 0x0f)
-               return -ERANGE;
+       val = (one->port.rs485.delay_rts_before_send << 4) |
+               one->port.rs485.delay_rts_after_send;
+       max310x_port_write(&one->port, MAX310X_HDPIXDELAY_REG, val);
 
-       val = (rs485->delay_rts_before_send << 4) |
-               rs485->delay_rts_after_send;
-       max310x_port_write(port, MAX310X_HDPIXDELAY_REG, val);
-       if (rs485->flags & SER_RS485_ENABLED) {
-               max310x_port_update(port, MAX310X_MODE1_REG,
+       if (one->port.rs485.flags & SER_RS485_ENABLED) {
+               max310x_port_update(&one->port, MAX310X_MODE1_REG,
                                MAX310X_MODE1_TRNSCVCTRL_BIT,
                                MAX310X_MODE1_TRNSCVCTRL_BIT);
-               max310x_port_update(port, MAX310X_MODE2_REG,
+               max310x_port_update(&one->port, MAX310X_MODE2_REG,
                                MAX310X_MODE2_ECHOSUPR_BIT,
                                MAX310X_MODE2_ECHOSUPR_BIT);
        } else {
-               max310x_port_update(port, MAX310X_MODE1_REG,
+               max310x_port_update(&one->port, MAX310X_MODE1_REG,
                                MAX310X_MODE1_TRNSCVCTRL_BIT, 0);
-               max310x_port_update(port, MAX310X_MODE2_REG,
+               max310x_port_update(&one->port, MAX310X_MODE2_REG,
                                MAX310X_MODE2_ECHOSUPR_BIT, 0);
        }
+}
+
+static int max310x_rs485_config(struct uart_port *port,
+                               struct serial_rs485 *rs485)
+{
+       struct max310x_one *one = container_of(port, struct max310x_one, port);
+
+       if ((rs485->delay_rts_before_send > 0x0f) ||
+           (rs485->delay_rts_after_send > 0x0f))
+               return -ERANGE;
 
        rs485->flags &= SER_RS485_RTS_ON_SEND | SER_RS485_ENABLED;
        memset(rs485->padding, 0, sizeof(rs485->padding));
        port->rs485 = *rs485;
 
+       schedule_work(&one->rs_work);
+
        return 0;
 }
 
@@ -1009,8 +1023,8 @@ static int __maybe_unused max310x_suspend(struct device *dev)
        struct max310x_port *s = dev_get_drvdata(dev);
        int i;
 
-       for (i = 0; i < s->uart.nr; i++) {
-               uart_suspend_port(&s->uart, &s->p[i].port);
+       for (i = 0; i < s->devtype->nr; i++) {
+               uart_suspend_port(&max310x_uart, &s->p[i].port);
                s->devtype->power(&s->p[i].port, 0);
        }
 
@@ -1022,9 +1036,9 @@ static int __maybe_unused max310x_resume(struct device *dev)
        struct max310x_port *s = dev_get_drvdata(dev);
        int i;
 
-       for (i = 0; i < s->uart.nr; i++) {
+       for (i = 0; i < s->devtype->nr; i++) {
                s->devtype->power(&s->p[i].port, 1);
-               uart_resume_port(&s->uart, &s->p[i].port);
+               uart_resume_port(&max310x_uart, &s->p[i].port);
        }
 
        return 0;
@@ -1159,18 +1173,6 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
        uartclk = max310x_set_ref_clk(s, freq, xtal);
        dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
 
-       /* Register UART driver */
-       s->uart.owner           = THIS_MODULE;
-       s->uart.dev_name        = "ttyMAX";
-       s->uart.major           = MAX310X_MAJOR;
-       s->uart.minor           = MAX310X_MINOR;
-       s->uart.nr              = devtype->nr;
-       ret = uart_register_driver(&s->uart);
-       if (ret) {
-               dev_err(dev, "Registering UART driver failed\n");
-               goto out_clk;
-       }
-
 #ifdef CONFIG_GPIOLIB
        /* Setup GPIO cotroller */
        s->gpio.owner           = THIS_MODULE;
@@ -1183,16 +1185,24 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
        s->gpio.base            = -1;
        s->gpio.ngpio           = devtype->nr * 4;
        s->gpio.can_sleep       = 1;
-       ret = gpiochip_add_data(&s->gpio, s);
+       ret = devm_gpiochip_add_data(dev, &s->gpio, s);
        if (ret)
-               goto out_uart;
+               goto out_clk;
 #endif
 
        mutex_init(&s->mutex);
 
        for (i = 0; i < devtype->nr; i++) {
+               unsigned int line;
+
+               line = find_first_zero_bit(max310x_lines, MAX310X_UART_NRMAX);
+               if (line == MAX310X_UART_NRMAX) {
+                       ret = -ERANGE;
+                       goto out_uart;
+               }
+
                /* Initialize port data */
-               s->p[i].port.line       = i;
+               s->p[i].port.line       = line;
                s->p[i].port.dev        = dev;
                s->p[i].port.irq        = irq;
                s->p[i].port.type       = PORT_MAX310X;
@@ -1214,10 +1224,19 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
                                    MAX310X_MODE1_IRQSEL_BIT);
                /* Initialize queue for start TX */
                INIT_WORK(&s->p[i].tx_work, max310x_wq_proc);
-               /* Initialize queue for changing mode */
+               /* Initialize queue for changing LOOPBACK mode */
                INIT_WORK(&s->p[i].md_work, max310x_md_proc);
+               /* Initialize queue for changing RS485 mode */
+               INIT_WORK(&s->p[i].rs_work, max310x_rs_proc);
+
                /* Register port */
-               uart_add_one_port(&s->uart, &s->p[i].port);
+               ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
+               if (ret) {
+                       s->p[i].port.dev = NULL;
+                       goto out_uart;
+               }
+               set_bit(line, max310x_lines);
+
                /* Go to suspend mode */
                devtype->power(&s->p[i].port, 0);
        }
@@ -1230,14 +1249,15 @@ static int max310x_probe(struct device *dev, struct max310x_devtype *devtype,
 
        dev_err(dev, "Unable to reguest IRQ %i\n", irq);
 
-       mutex_destroy(&s->mutex);
-
-#ifdef CONFIG_GPIOLIB
-       gpiochip_remove(&s->gpio);
-
 out_uart:
-#endif
-       uart_unregister_driver(&s->uart);
+       for (i = 0; i < devtype->nr; i++) {
+               if (s->p[i].port.dev) {
+                       uart_remove_one_port(&max310x_uart, &s->p[i].port);
+                       clear_bit(s->p[i].port.line, max310x_lines);
+               }
+       }
+
+       mutex_destroy(&s->mutex);
 
 out_clk:
        clk_disable_unprepare(s->clk);
@@ -1250,19 +1270,16 @@ static int max310x_remove(struct device *dev)
        struct max310x_port *s = dev_get_drvdata(dev);
        int i;
 
-#ifdef CONFIG_GPIOLIB
-       gpiochip_remove(&s->gpio);
-#endif
-
-       for (i = 0; i < s->uart.nr; i++) {
+       for (i = 0; i < s->devtype->nr; i++) {
                cancel_work_sync(&s->p[i].tx_work);
                cancel_work_sync(&s->p[i].md_work);
-               uart_remove_one_port(&s->uart, &s->p[i].port);
+               cancel_work_sync(&s->p[i].rs_work);
+               uart_remove_one_port(&max310x_uart, &s->p[i].port);
+               clear_bit(s->p[i].port.line, max310x_lines);
                s->devtype->power(&s->p[i].port, 0);
        }
 
        mutex_destroy(&s->mutex);
-       uart_unregister_driver(&s->uart);
        clk_disable_unprepare(s->clk);
 
        return 0;
@@ -1335,7 +1352,7 @@ static const struct spi_device_id max310x_id_table[] = {
 };
 MODULE_DEVICE_TABLE(spi, max310x_id_table);
 
-static struct spi_driver max310x_uart_driver = {
+static struct spi_driver max310x_spi_driver = {
        .driver = {
                .name           = MAX310X_NAME,
                .of_match_table = of_match_ptr(max310x_dt_ids),
@@ -1345,9 +1362,36 @@ static struct spi_driver max310x_uart_driver = {
        .remove         = max310x_spi_remove,
        .id_table       = max310x_id_table,
 };
-module_spi_driver(max310x_uart_driver);
 #endif
 
+static int __init max310x_uart_init(void)
+{
+       int ret;
+
+       bitmap_zero(max310x_lines, MAX310X_UART_NRMAX);
+
+       ret = uart_register_driver(&max310x_uart);
+       if (ret)
+               return ret;
+
+#ifdef CONFIG_SPI_MASTER
+       spi_register_driver(&max310x_spi_driver);
+#endif
+
+       return 0;
+}
+module_init(max310x_uart_init);
+
+static void __exit max310x_uart_exit(void)
+{
+#ifdef CONFIG_SPI_MASTER
+       spi_unregister_driver(&max310x_spi_driver);
+#endif
+
+       uart_unregister_driver(&max310x_uart);
+}
+module_exit(max310x_uart_exit);
+
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
 MODULE_DESCRIPTION("MAX310X serial driver");
index da9e27d3c2639dbe26d1cbd5566053d4bcd72d55..492ec4b375a0739d94026a1c2c53bca4db36b7c9 100644 (file)
@@ -1,4 +1,6 @@
 /*
+ * MPS2 UART driver
+ *
  * Copyright (C) 2015 ARM Limited
  *
  * Author: Vladimir Murzin <vladimir.murzin@arm.com>
@@ -17,7 +19,6 @@
 #include <linux/console.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
-#include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -569,30 +570,20 @@ static int mps2_serial_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int mps2_serial_remove(struct platform_device *pdev)
-{
-       struct mps2_uart_port *mps_port = platform_get_drvdata(pdev);
-
-       uart_remove_one_port(&mps2_uart_driver, &mps_port->port);
-
-       return 0;
-}
-
 #ifdef CONFIG_OF
 static const struct of_device_id mps2_match[] = {
        { .compatible = "arm,mps2-uart", },
        {},
 };
-MODULE_DEVICE_TABLE(of, mps2_match);
 #endif
 
 static struct platform_driver mps2_serial_driver = {
        .probe = mps2_serial_probe,
-       .remove = mps2_serial_remove,
 
        .driver = {
                .name = DRIVER_NAME,
                .of_match_table = of_match_ptr(mps2_match),
+               .suppress_bind_attrs = true,
        },
 };
 
@@ -610,16 +601,4 @@ static int __init mps2_uart_init(void)
 
        return ret;
 }
-module_init(mps2_uart_init);
-
-static void __exit mps2_uart_exit(void)
-{
-       platform_driver_unregister(&mps2_serial_driver);
-       uart_unregister_driver(&mps2_uart_driver);
-}
-module_exit(mps2_uart_exit);
-
-MODULE_AUTHOR("Vladimir Murzin <vladimir.murzin@arm.com>");
-MODULE_DESCRIPTION("MPS2 UART driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
+arch_initcall(mps2_uart_init);
index b7d80bd57db9cd4eb8397f75f8a83ae40c0dd65c..7312e7e01b7e5901ab676ce99058e50407bc773c 100644 (file)
 # define SUPPORT_SYSRQ
 #endif
 
+#include <linux/kernel.h>
 #include <linux/atomic.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
-#include <linux/hrtimer.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/console.h>
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
-#include <linux/serial.h>
 #include <linux/slab.h>
 #include <linux/clk.h>
 #include <linux/platform_device.h>
 #include <linux/delay.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
-
-#include "msm_serial.h"
-
-#define UARTDM_BURST_SIZE      16   /* in bytes */
-#define UARTDM_TX_AIGN(x)      ((x) & ~0x3) /* valid for > 1p3 */
-#define UARTDM_TX_MAX          256   /* in bytes, valid for <= 1p3 */
-#define UARTDM_RX_SIZE         (UART_XMIT_SIZE / 4)
+#include <linux/wait.h>
+
+#define UART_MR1                       0x0000
+
+#define UART_MR1_AUTO_RFR_LEVEL0       0x3F
+#define UART_MR1_AUTO_RFR_LEVEL1       0x3FF00
+#define UART_DM_MR1_AUTO_RFR_LEVEL1    0xFFFFFF00
+#define UART_MR1_RX_RDY_CTL            BIT(7)
+#define UART_MR1_CTS_CTL               BIT(6)
+
+#define UART_MR2                       0x0004
+#define UART_MR2_ERROR_MODE            BIT(6)
+#define UART_MR2_BITS_PER_CHAR         0x30
+#define UART_MR2_BITS_PER_CHAR_5       (0x0 << 4)
+#define UART_MR2_BITS_PER_CHAR_6       (0x1 << 4)
+#define UART_MR2_BITS_PER_CHAR_7       (0x2 << 4)
+#define UART_MR2_BITS_PER_CHAR_8       (0x3 << 4)
+#define UART_MR2_STOP_BIT_LEN_ONE      (0x1 << 2)
+#define UART_MR2_STOP_BIT_LEN_TWO      (0x3 << 2)
+#define UART_MR2_PARITY_MODE_NONE      0x0
+#define UART_MR2_PARITY_MODE_ODD       0x1
+#define UART_MR2_PARITY_MODE_EVEN      0x2
+#define UART_MR2_PARITY_MODE_SPACE     0x3
+#define UART_MR2_PARITY_MODE           0x3
+
+#define UART_CSR                       0x0008
+
+#define UART_TF                                0x000C
+#define UARTDM_TF                      0x0070
+
+#define UART_CR                                0x0010
+#define UART_CR_CMD_NULL               (0 << 4)
+#define UART_CR_CMD_RESET_RX           (1 << 4)
+#define UART_CR_CMD_RESET_TX           (2 << 4)
+#define UART_CR_CMD_RESET_ERR          (3 << 4)
+#define UART_CR_CMD_RESET_BREAK_INT    (4 << 4)
+#define UART_CR_CMD_START_BREAK                (5 << 4)
+#define UART_CR_CMD_STOP_BREAK         (6 << 4)
+#define UART_CR_CMD_RESET_CTS          (7 << 4)
+#define UART_CR_CMD_RESET_STALE_INT    (8 << 4)
+#define UART_CR_CMD_PACKET_MODE                (9 << 4)
+#define UART_CR_CMD_MODE_RESET         (12 << 4)
+#define UART_CR_CMD_SET_RFR            (13 << 4)
+#define UART_CR_CMD_RESET_RFR          (14 << 4)
+#define UART_CR_CMD_PROTECTION_EN      (16 << 4)
+#define UART_CR_CMD_STALE_EVENT_DISABLE        (6 << 8)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_CMD_FORCE_STALE                (4 << 8)
+#define UART_CR_CMD_RESET_TX_READY     (3 << 8)
+#define UART_CR_TX_DISABLE             BIT(3)
+#define UART_CR_TX_ENABLE              BIT(2)
+#define UART_CR_RX_DISABLE             BIT(1)
+#define UART_CR_RX_ENABLE              BIT(0)
+#define UART_CR_CMD_RESET_RXBREAK_START        ((1 << 11) | (2 << 4))
+
+#define UART_IMR                       0x0014
+#define UART_IMR_TXLEV                 BIT(0)
+#define UART_IMR_RXSTALE               BIT(3)
+#define UART_IMR_RXLEV                 BIT(4)
+#define UART_IMR_DELTA_CTS             BIT(5)
+#define UART_IMR_CURRENT_CTS           BIT(6)
+#define UART_IMR_RXBREAK_START         BIT(10)
+
+#define UART_IPR_RXSTALE_LAST          0x20
+#define UART_IPR_STALE_LSB             0x1F
+#define UART_IPR_STALE_TIMEOUT_MSB     0x3FF80
+#define UART_DM_IPR_STALE_TIMEOUT_MSB  0xFFFFFF80
+
+#define UART_IPR                       0x0018
+#define UART_TFWR                      0x001C
+#define UART_RFWR                      0x0020
+#define UART_HCR                       0x0024
+
+#define UART_MREG                      0x0028
+#define UART_NREG                      0x002C
+#define UART_DREG                      0x0030
+#define UART_MNDREG                    0x0034
+#define UART_IRDA                      0x0038
+#define UART_MISR_MODE                 0x0040
+#define UART_MISR_RESET                        0x0044
+#define UART_MISR_EXPORT               0x0048
+#define UART_MISR_VAL                  0x004C
+#define UART_TEST_CTRL                 0x0050
+
+#define UART_SR                                0x0008
+#define UART_SR_HUNT_CHAR              BIT(7)
+#define UART_SR_RX_BREAK               BIT(6)
+#define UART_SR_PAR_FRAME_ERR          BIT(5)
+#define UART_SR_OVERRUN                        BIT(4)
+#define UART_SR_TX_EMPTY               BIT(3)
+#define UART_SR_TX_READY               BIT(2)
+#define UART_SR_RX_FULL                        BIT(1)
+#define UART_SR_RX_READY               BIT(0)
+
+#define UART_RF                                0x000C
+#define UARTDM_RF                      0x0070
+#define UART_MISR                      0x0010
+#define UART_ISR                       0x0014
+#define UART_ISR_TX_READY              BIT(7)
+
+#define UARTDM_RXFS                    0x50
+#define UARTDM_RXFS_BUF_SHIFT          0x7
+#define UARTDM_RXFS_BUF_MASK           0x7
+
+#define UARTDM_DMEN                    0x3C
+#define UARTDM_DMEN_RX_SC_ENABLE       BIT(5)
+#define UARTDM_DMEN_TX_SC_ENABLE       BIT(4)
+
+#define UARTDM_DMEN_TX_BAM_ENABLE      BIT(2)  /* UARTDM_1P4 */
+#define UARTDM_DMEN_TX_DM_ENABLE       BIT(0)  /* < UARTDM_1P4 */
+
+#define UARTDM_DMEN_RX_BAM_ENABLE      BIT(3)  /* UARTDM_1P4 */
+#define UARTDM_DMEN_RX_DM_ENABLE       BIT(1)  /* < UARTDM_1P4 */
+
+#define UARTDM_DMRX                    0x34
+#define UARTDM_NCF_TX                  0x40
+#define UARTDM_RX_TOTAL_SNAP           0x38
+
+#define UARTDM_BURST_SIZE              16   /* in bytes */
+#define UARTDM_TX_AIGN(x)              ((x) & ~0x3) /* valid for > 1p3 */
+#define UARTDM_TX_MAX                  256   /* in bytes, valid for <= 1p3 */
+#define UARTDM_RX_SIZE                 (UART_XMIT_SIZE / 4)
 
 enum {
        UARTDM_1P1 = 1,
@@ -78,10 +192,65 @@ struct msm_port {
        struct msm_dma          rx_dma;
 };
 
+#define UART_TO_MSM(uart_port) container_of(uart_port, struct msm_port, uart)
+
+static
+void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
+{
+       writel_relaxed(val, port->membase + off);
+}
+
+static
+unsigned int msm_read(struct uart_port *port, unsigned int off)
+{
+       return readl_relaxed(port->membase + off);
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock.
+ */
+static void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
+{
+       msm_write(port, 0x06, UART_MREG);
+       msm_write(port, 0xF1, UART_NREG);
+       msm_write(port, 0x0F, UART_DREG);
+       msm_write(port, 0x1A, UART_MNDREG);
+       port->uartclk = 1843200;
+}
+
+/*
+ * Setup the MND registers to use the TCXO clock divided by 4.
+ */
+static void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
+{
+       msm_write(port, 0x18, UART_MREG);
+       msm_write(port, 0xF6, UART_NREG);
+       msm_write(port, 0x0F, UART_DREG);
+       msm_write(port, 0x0A, UART_MNDREG);
+       port->uartclk = 1843200;
+}
+
+static void msm_serial_set_mnd_regs(struct uart_port *port)
+{
+       struct msm_port *msm_port = UART_TO_MSM(port);
+
+       /*
+        * These registers don't exist so we change the clk input rate
+        * on uartdm hardware instead
+        */
+       if (msm_port->is_uartdm)
+               return;
+
+       if (port->uartclk == 19200000)
+               msm_serial_set_mnd_regs_tcxo(port);
+       else if (port->uartclk == 4800000)
+               msm_serial_set_mnd_regs_tcxoby4(port);
+}
+
 static void msm_handle_tx(struct uart_port *port);
 static void msm_start_rx_dma(struct msm_port *msm_port);
 
-void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
+static void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
 {
        struct device *dev = port->dev;
        unsigned int mapped;
@@ -388,10 +557,6 @@ static void msm_complete_rx_dma(void *args)
        val &= ~dma->enable_bit;
        msm_write(port, val, UARTDM_DMEN);
 
-       /* Restore interrupts */
-       msm_port->imr |= UART_IMR_RXLEV | UART_IMR_RXSTALE;
-       msm_write(port, msm_port->imr, UART_IMR);
-
        if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
                port->icount.overrun++;
                tty_insert_flip_char(tport, 0, TTY_OVERRUN);
@@ -726,7 +891,7 @@ static void msm_handle_tx(struct uart_port *port)
                return;
        }
 
-       pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
+       pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
        dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
 
        dma_min = 1;    /* Always DMA */
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
deleted file mode 100644 (file)
index 1786458..0000000
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (C) 2007 Google, Inc.
- * Author: Robert Love <rlove@google.com>
- * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __DRIVERS_SERIAL_MSM_SERIAL_H
-#define __DRIVERS_SERIAL_MSM_SERIAL_H
-
-#define UART_MR1                       0x0000
-
-#define UART_MR1_AUTO_RFR_LEVEL0       0x3F
-#define UART_MR1_AUTO_RFR_LEVEL1       0x3FF00
-#define UART_DM_MR1_AUTO_RFR_LEVEL1    0xFFFFFF00
-#define UART_MR1_RX_RDY_CTL            BIT(7)
-#define UART_MR1_CTS_CTL               BIT(6)
-
-#define UART_MR2                       0x0004
-#define UART_MR2_ERROR_MODE            BIT(6)
-#define UART_MR2_BITS_PER_CHAR         0x30
-#define UART_MR2_BITS_PER_CHAR_5       (0x0 << 4)
-#define UART_MR2_BITS_PER_CHAR_6       (0x1 << 4)
-#define UART_MR2_BITS_PER_CHAR_7       (0x2 << 4)
-#define UART_MR2_BITS_PER_CHAR_8       (0x3 << 4)
-#define UART_MR2_STOP_BIT_LEN_ONE      (0x1 << 2)
-#define UART_MR2_STOP_BIT_LEN_TWO      (0x3 << 2)
-#define UART_MR2_PARITY_MODE_NONE      0x0
-#define UART_MR2_PARITY_MODE_ODD       0x1
-#define UART_MR2_PARITY_MODE_EVEN      0x2
-#define UART_MR2_PARITY_MODE_SPACE     0x3
-#define UART_MR2_PARITY_MODE           0x3
-
-#define UART_CSR                       0x0008
-
-#define UART_TF                0x000C
-#define UARTDM_TF      0x0070
-
-#define UART_CR                                0x0010
-#define UART_CR_CMD_NULL               (0 << 4)
-#define UART_CR_CMD_RESET_RX           (1 << 4)
-#define UART_CR_CMD_RESET_TX           (2 << 4)
-#define UART_CR_CMD_RESET_ERR          (3 << 4)
-#define UART_CR_CMD_RESET_BREAK_INT    (4 << 4)
-#define UART_CR_CMD_START_BREAK                (5 << 4)
-#define UART_CR_CMD_STOP_BREAK         (6 << 4)
-#define UART_CR_CMD_RESET_CTS          (7 << 4)
-#define UART_CR_CMD_RESET_STALE_INT    (8 << 4)
-#define UART_CR_CMD_PACKET_MODE                (9 << 4)
-#define UART_CR_CMD_MODE_RESET         (12 << 4)
-#define UART_CR_CMD_SET_RFR            (13 << 4)
-#define UART_CR_CMD_RESET_RFR          (14 << 4)
-#define UART_CR_CMD_PROTECTION_EN      (16 << 4)
-#define UART_CR_CMD_STALE_EVENT_DISABLE        (6 << 8)
-#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
-#define UART_CR_CMD_FORCE_STALE                (4 << 8)
-#define UART_CR_CMD_RESET_TX_READY     (3 << 8)
-#define UART_CR_TX_DISABLE             BIT(3)
-#define UART_CR_TX_ENABLE              BIT(2)
-#define UART_CR_RX_DISABLE             BIT(1)
-#define UART_CR_RX_ENABLE              BIT(0)
-#define UART_CR_CMD_RESET_RXBREAK_START        ((1 << 11) | (2 << 4))
-
-#define UART_IMR               0x0014
-#define UART_IMR_TXLEV                 BIT(0)
-#define UART_IMR_RXSTALE               BIT(3)
-#define UART_IMR_RXLEV                 BIT(4)
-#define UART_IMR_DELTA_CTS             BIT(5)
-#define UART_IMR_CURRENT_CTS           BIT(6)
-#define UART_IMR_RXBREAK_START         BIT(10)
-
-#define UART_IPR_RXSTALE_LAST          0x20
-#define UART_IPR_STALE_LSB             0x1F
-#define UART_IPR_STALE_TIMEOUT_MSB     0x3FF80
-#define UART_DM_IPR_STALE_TIMEOUT_MSB  0xFFFFFF80
-
-#define UART_IPR       0x0018
-#define UART_TFWR      0x001C
-#define UART_RFWR      0x0020
-#define UART_HCR       0x0024
-
-#define UART_MREG              0x0028
-#define UART_NREG              0x002C
-#define UART_DREG              0x0030
-#define UART_MNDREG            0x0034
-#define UART_IRDA              0x0038
-#define UART_MISR_MODE         0x0040
-#define UART_MISR_RESET                0x0044
-#define UART_MISR_EXPORT       0x0048
-#define UART_MISR_VAL          0x004C
-#define UART_TEST_CTRL         0x0050
-
-#define UART_SR                        0x0008
-#define UART_SR_HUNT_CHAR      BIT(7)
-#define UART_SR_RX_BREAK       BIT(6)
-#define UART_SR_PAR_FRAME_ERR  BIT(5)
-#define UART_SR_OVERRUN                BIT(4)
-#define UART_SR_TX_EMPTY       BIT(3)
-#define UART_SR_TX_READY       BIT(2)
-#define UART_SR_RX_FULL                BIT(1)
-#define UART_SR_RX_READY       BIT(0)
-
-#define UART_RF                        0x000C
-#define UARTDM_RF              0x0070
-#define UART_MISR              0x0010
-#define UART_ISR               0x0014
-#define UART_ISR_TX_READY      BIT(7)
-
-#define UARTDM_RXFS            0x50
-#define UARTDM_RXFS_BUF_SHIFT  0x7
-#define UARTDM_RXFS_BUF_MASK   0x7
-
-#define UARTDM_DMEN            0x3C
-#define UARTDM_DMEN_RX_SC_ENABLE BIT(5)
-#define UARTDM_DMEN_TX_SC_ENABLE BIT(4)
-
-#define UARTDM_DMEN_TX_BAM_ENABLE BIT(2)       /* UARTDM_1P4 */
-#define UARTDM_DMEN_TX_DM_ENABLE  BIT(0)       /* < UARTDM_1P4 */
-
-#define UARTDM_DMEN_RX_BAM_ENABLE BIT(3)       /* UARTDM_1P4 */
-#define UARTDM_DMEN_RX_DM_ENABLE  BIT(1)       /* < UARTDM_1P4 */
-
-#define UARTDM_DMRX            0x34
-#define UARTDM_NCF_TX          0x40
-#define UARTDM_RX_TOTAL_SNAP   0x38
-
-#define UART_TO_MSM(uart_port) ((struct msm_port *) uart_port)
-
-static inline
-void msm_write(struct uart_port *port, unsigned int val, unsigned int off)
-{
-       writel_relaxed(val, port->membase + off);
-}
-
-static inline
-unsigned int msm_read(struct uart_port *port, unsigned int off)
-{
-       return readl_relaxed(port->membase + off);
-}
-
-/*
- * Setup the MND registers to use the TCXO clock.
- */
-static inline void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
-{
-       msm_write(port, 0x06, UART_MREG);
-       msm_write(port, 0xF1, UART_NREG);
-       msm_write(port, 0x0F, UART_DREG);
-       msm_write(port, 0x1A, UART_MNDREG);
-       port->uartclk = 1843200;
-}
-
-/*
- * Setup the MND registers to use the TCXO clock divided by 4.
- */
-static inline void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
-{
-       msm_write(port, 0x18, UART_MREG);
-       msm_write(port, 0xF6, UART_NREG);
-       msm_write(port, 0x0F, UART_DREG);
-       msm_write(port, 0x0A, UART_MNDREG);
-       port->uartclk = 1843200;
-}
-
-static inline
-void msm_serial_set_mnd_regs_from_uartclk(struct uart_port *port)
-{
-       if (port->uartclk == 19200000)
-               msm_serial_set_mnd_regs_tcxo(port);
-       else if (port->uartclk == 4800000)
-               msm_serial_set_mnd_regs_tcxoby4(port);
-}
-
-#define msm_serial_set_mnd_regs msm_serial_set_mnd_regs_from_uartclk
-
-#endif /* __DRIVERS_SERIAL_MSM_SERIAL_H */
index ce362bd51de78d65177c889541e80d33aae1779d..45b57c294d13b19b8d47f2e8b2292f68c8167485 100644 (file)
@@ -300,6 +300,8 @@ static int mvebu_uart_startup(struct uart_port *port)
 static void mvebu_uart_shutdown(struct uart_port *port)
 {
        writel(0, port->membase + UART_CTRL);
+
+       free_irq(port->irq, port);
 }
 
 static void mvebu_uart_set_termios(struct uart_port *port,
index 62a43bf5698e5e3c335933ade8659eaa05d1a709..7f8e99bbcb739832824682ebe7321c7c653a22da 100644 (file)
@@ -445,7 +445,6 @@ static int pic32_uart_startup(struct uart_port *port)
                                       sport->idx);
        if (!sport->irq_rx_name) {
                dev_err(port->dev, "%s: kasprintf err!", __func__);
-               kfree(sport->irq_fault_name);
                ret = -ENOMEM;
                goto out_f;
        }
index e156e39d620cebd2ee48304ed80223fd7c96a2c2..b24b0556f5a853c7ac742620a4bfafcb918f9af9 100644 (file)
@@ -1720,7 +1720,7 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
 
        r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(uap->pdev, 0);
-       if (!r_ports || !irq)
+       if (!r_ports || irq <= 0)
                return -ENODEV;
 
        uap->port.mapbase  = r_ports->start;
index 41eab75ba2af89955f90d3802992e0dfb50383c3..cd9d9e87847555ee486e4e0d8ff85ceab5349a7e 100644 (file)
@@ -27,7 +27,6 @@
 #define SUPPORT_SYSRQ
 #endif
 
-#include <linux/module.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
 #include <linux/console.h>
@@ -829,7 +828,6 @@ static const struct of_device_id serial_pxa_dt_ids[] = {
        { .compatible = "mrvl,mmp-uart", },
        {}
 };
-MODULE_DEVICE_TABLE(of, serial_pxa_dt_ids);
 
 static int serial_pxa_probe_dt(struct platform_device *pdev,
                               struct uart_pxa_port *sport)
@@ -914,28 +912,15 @@ static int serial_pxa_probe(struct platform_device *dev)
        return ret;
 }
 
-static int serial_pxa_remove(struct platform_device *dev)
-{
-       struct uart_pxa_port *sport = platform_get_drvdata(dev);
-
-       uart_remove_one_port(&serial_pxa_reg, &sport->port);
-
-       clk_unprepare(sport->clk);
-       clk_put(sport->clk);
-       kfree(sport);
-
-       return 0;
-}
-
 static struct platform_driver serial_pxa_driver = {
         .probe          = serial_pxa_probe,
-        .remove         = serial_pxa_remove,
 
        .driver         = {
                .name   = "pxa2xx-uart",
 #ifdef CONFIG_PM
                .pm     = &serial_pxa_pm_ops,
 #endif
+               .suppress_bind_attrs = true,
                .of_match_table = serial_pxa_dt_ids,
        },
 };
@@ -954,15 +939,4 @@ static int __init serial_pxa_init(void)
 
        return ret;
 }
-
-static void __exit serial_pxa_exit(void)
-{
-       platform_driver_unregister(&serial_pxa_driver);
-       uart_unregister_driver(&serial_pxa_reg);
-}
-
-module_init(serial_pxa_init);
-module_exit(serial_pxa_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pxa2xx-uart");
+device_initcall(serial_pxa_init);
index 99bb23161dd6e5f3ac514f9a9218cd4590467169..ae2095a66708e0b06bf05bfe38d4a0901b158d3d 100644 (file)
@@ -169,8 +169,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)
                return;
 
        if (s3c24xx_serial_has_interrupt_mask(port))
-               __set_bit(S3C64XX_UINTM_TXD,
-                       portaddrl(port, S3C64XX_UINTM));
+               s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM);
        else
                disable_irq_nosync(ourport->tx_irq);
 
@@ -235,8 +234,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport)
 
        /* Mask Tx interrupt */
        if (s3c24xx_serial_has_interrupt_mask(port))
-               __set_bit(S3C64XX_UINTM_TXD,
-                         portaddrl(port, S3C64XX_UINTM));
+               s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM);
        else
                disable_irq_nosync(ourport->tx_irq);
 
@@ -269,8 +267,8 @@ static void enable_tx_pio(struct s3c24xx_uart_port *ourport)
 
        /* Unmask Tx interrupt */
        if (s3c24xx_serial_has_interrupt_mask(port))
-               __clear_bit(S3C64XX_UINTM_TXD,
-                           portaddrl(port, S3C64XX_UINTM));
+               s3c24xx_clear_bit(port, S3C64XX_UINTM_TXD,
+                                 S3C64XX_UINTM);
        else
                enable_irq(ourport->tx_irq);
 
@@ -397,8 +395,8 @@ static void s3c24xx_serial_stop_rx(struct uart_port *port)
        if (rx_enabled(port)) {
                dbg("s3c24xx_serial_stop_rx: port=%p\n", port);
                if (s3c24xx_serial_has_interrupt_mask(port))
-                       __set_bit(S3C64XX_UINTM_RXD,
-                               portaddrl(port, S3C64XX_UINTM));
+                       s3c24xx_set_bit(port, S3C64XX_UINTM_RXD,
+                                       S3C64XX_UINTM);
                else
                        disable_irq_nosync(ourport->rx_irq);
                rx_enabled(port) = 0;
@@ -1069,7 +1067,7 @@ static int s3c64xx_serial_startup(struct uart_port *port)
        spin_unlock_irqrestore(&port->lock, flags);
 
        /* Enable Rx Interrupt */
-       __clear_bit(S3C64XX_UINTM_RXD, portaddrl(port, S3C64XX_UINTM));
+       s3c24xx_clear_bit(port, S3C64XX_UINTM_RXD, S3C64XX_UINTM);
 
        dbg("s3c64xx_serial_startup ok\n");
        return ret;
@@ -1684,7 +1682,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
                return -ENODEV;
 
        if (port->mapbase != 0)
-               return 0;
+               return -EINVAL;
 
        /* setup info for port */
        port->dev       = &platdev->dev;
@@ -1738,22 +1736,25 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
                ourport->dma = devm_kzalloc(port->dev,
                                            sizeof(*ourport->dma),
                                            GFP_KERNEL);
-               if (!ourport->dma)
-                       return -ENOMEM;
+               if (!ourport->dma) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
        }
 
        ourport->clk    = clk_get(&platdev->dev, "uart");
        if (IS_ERR(ourport->clk)) {
                pr_err("%s: Controller clock not found\n",
                                dev_name(&platdev->dev));
-               return PTR_ERR(ourport->clk);
+               ret = PTR_ERR(ourport->clk);
+               goto err;
        }
 
        ret = clk_prepare_enable(ourport->clk);
        if (ret) {
                pr_err("uart: clock failed to prepare+enable: %d\n", ret);
                clk_put(ourport->clk);
-               return ret;
+               goto err;
        }
 
        /* Keep all interrupts masked and cleared */
@@ -1769,7 +1770,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 
        /* reset the fifos (and setup the uart) */
        s3c24xx_serial_resetport(port, cfg);
+
        return 0;
+
+err:
+       port->mapbase = 0;
+       return ret;
 }
 
 /* Device driver serial port probe */
@@ -1836,8 +1842,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
        ourport->min_dma_size = max_t(int, ourport->port.fifosize,
                                    dma_get_cache_alignment());
 
-       probe_index++;
-
        dbg("%s: initialising port %p...\n", __func__, ourport);
 
        ret = s3c24xx_serial_init_port(ourport, pdev);
@@ -1867,6 +1871,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
        if (ret < 0)
                dev_err(&pdev->dev, "failed to add cpufreq notifier\n");
 
+       probe_index++;
+
        return 0;
 }
 
index fc5deaa4f382def3e5d1bfe69497302a6c3a4abb..2ae4fcee181458ce008c499b266e1e7ecd3d614f 100644 (file)
@@ -117,10 +117,38 @@ struct s3c24xx_uart_port {
 #define portaddrl(port, reg) \
        ((unsigned long *)(unsigned long)((port)->membase + (reg)))
 
-#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
-#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
-
-#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
-#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
+#define rd_regb(port, reg) (readb_relaxed(portaddr(port, reg)))
+#define rd_regl(port, reg) (readl_relaxed(portaddr(port, reg)))
+
+#define wr_regb(port, reg, val) writeb_relaxed(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) writel_relaxed(val, portaddr(port, reg))
+
+/* Byte-order aware bit setting/clearing functions. */
+
+static inline void s3c24xx_set_bit(struct uart_port *port, int idx,
+                                  unsigned int reg)
+{
+       unsigned long flags;
+       u32 val;
+
+       local_irq_save(flags);
+       val = rd_regl(port, reg);
+       val |= (1 << idx);
+       wr_regl(port, reg, val);
+       local_irq_restore(flags);
+}
+
+static inline void s3c24xx_clear_bit(struct uart_port *port, int idx,
+                                    unsigned int reg)
+{
+       unsigned long flags;
+       u32 val;
+
+       local_irq_save(flags);
+       val = rd_regl(port, reg);
+       val &= ~(1 << idx);
+       wr_regl(port, reg, val);
+       local_irq_restore(flags);
+}
 
 #endif
index 1dba6719db8ddef3f3b88812d584822577c2cbb4..731ac35acb3125a2dcee3a6dbeaee93fb2963de2 100644 (file)
@@ -1317,7 +1317,12 @@ static int tegra_uart_probe(struct platform_device *pdev)
        }
 
        u->iotype = UPIO_MEM32;
-       u->irq = platform_get_irq(pdev, 0);
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Couldn't get IRQ\n");
+               return ret;
+       }
+       u->irq = ret;
        u->regshift = 2;
        ret = uart_add_one_port(&tegra_uart_driver, u);
        if (ret < 0) {
index a333c59cba2cf3fdc37d81dda232c1f7474660f5..9fc15335c8c59b70c93e8e6a1026b32da59359e5 100644 (file)
@@ -887,7 +887,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
                /*
                 * Free and release old regions
                 */
-               if (old_type != PORT_UNKNOWN)
+               if (old_type != PORT_UNKNOWN && uport->ops->release_port)
                        uport->ops->release_port(uport);
 
                uport->iobase = new_port;
@@ -900,7 +900,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
                /*
                 * Claim and map the new regions
                 */
-               if (uport->type != PORT_UNKNOWN) {
+               if (uport->type != PORT_UNKNOWN && uport->ops->request_port) {
                        retval = uport->ops->request_port(uport);
                } else {
                        /* Always success - Jean II */
@@ -1125,7 +1125,7 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
                 * If we already have a port type configured,
                 * we must release its resources.
                 */
-               if (uport->type != PORT_UNKNOWN)
+               if (uport->type != PORT_UNKNOWN && uport->ops->release_port)
                        uport->ops->release_port(uport);
 
                flags = UART_CONFIG_TYPE;
@@ -2897,7 +2897,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
        /*
         * Free the port IO and memory resources, if any.
         */
-       if (uport->type != PORT_UNKNOWN)
+       if (uport->type != PORT_UNKNOWN && uport->ops->release_port)
                uport->ops->release_port(uport);
        kfree(uport->tty_groups);
 
index e8dd5097dc5674d928a3f0f8c70658f737bb9aaa..d2da6aa7f27d0eb1ccb4c874711446e3952e5367 100644 (file)
@@ -52,6 +52,9 @@ void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl)
        int value_array[UART_GPIO_MAX];
        unsigned int count = 0;
 
+       if (gpios == NULL)
+               return;
+
        for (i = 0; i < UART_GPIO_MAX; i++)
                if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
                        desc_array[count] = gpios->gpio[i];
@@ -73,6 +76,9 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
 {
        enum mctrl_gpio_idx i;
 
+       if (gpios == NULL)
+               return *mctrl;
+
        for (i = 0; i < UART_GPIO_MAX; i++) {
                if (gpios->gpio[i] && !mctrl_gpios_desc[i].dir_out) {
                        if (gpiod_get_value(gpios->gpio[i]))
@@ -86,6 +92,27 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
 }
 EXPORT_SYMBOL_GPL(mctrl_gpio_get);
 
+unsigned int
+mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl)
+{
+       enum mctrl_gpio_idx i;
+
+       if (gpios == NULL)
+               return *mctrl;
+
+       for (i = 0; i < UART_GPIO_MAX; i++) {
+               if (gpios->gpio[i] && mctrl_gpios_desc[i].dir_out) {
+                       if (gpiod_get_value(gpios->gpio[i]))
+                               *mctrl |= mctrl_gpios_desc[i].mctrl;
+                       else
+                               *mctrl &= ~mctrl_gpios_desc[i].mctrl;
+               }
+       }
+
+       return *mctrl;
+}
+EXPORT_SYMBOL_GPL(mctrl_gpio_get_outputs);
+
 struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
 {
        struct mctrl_gpios *gpios;
@@ -203,6 +230,9 @@ void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
 {
        enum mctrl_gpio_idx i;
 
+       if (gpios == NULL)
+               return;
+
        for (i = 0; i < UART_GPIO_MAX; i++) {
                if (gpios->irq[i])
                        devm_free_irq(gpios->port->dev, gpios->irq[i], gpios);
@@ -218,6 +248,9 @@ void mctrl_gpio_enable_ms(struct mctrl_gpios *gpios)
 {
        enum mctrl_gpio_idx i;
 
+       if (gpios == NULL)
+               return;
+
        /* .enable_ms may be called multiple times */
        if (gpios->mctrl_on)
                return;
@@ -240,6 +273,9 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
 {
        enum mctrl_gpio_idx i;
 
+       if (gpios == NULL)
+               return;
+
        if (!gpios->mctrl_on)
                return;
 
index 332a33ab0647cbe3fe5d9b9b13dd1b8b78eb0b84..fa000bcff217004d29d05f5b943329fc07e0ca53 100644 (file)
@@ -48,11 +48,18 @@ struct mctrl_gpios;
 void mctrl_gpio_set(struct mctrl_gpios *gpios, unsigned int mctrl);
 
 /*
- * Get state of the modem control output lines from GPIOs.
+ * Get state of the modem control input lines from GPIOs.
  * The mctrl flags are updated and returned.
  */
 unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl);
 
+/*
+ * Get state of the modem control output lines from GPIOs.
+ * The mctrl flags are updated and returned.
+ */
+unsigned int
+mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl);
+
 /*
  * Returns the associated struct gpio_desc to the modem line gidx
  */
@@ -107,6 +114,12 @@ unsigned int mctrl_gpio_get(struct mctrl_gpios *gpios, unsigned int *mctrl)
        return *mctrl;
 }
 
+static inline unsigned int
+mctrl_gpio_get_outputs(struct mctrl_gpios *gpios, unsigned int *mctrl)
+{
+       return *mctrl;
+}
+
 static inline
 struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
                                      enum mctrl_gpio_idx gidx)
index 0130feb069aee02fbf5beeecacd9f674cde2e2ff..d86eee38aae6eba5953f0d7e4e5b2021bc6ee9f8 100644 (file)
@@ -57,6 +57,7 @@
 #include <asm/sh_bios.h>
 #endif
 
+#include "serial_mctrl_gpio.h"
 #include "sh-sci.h"
 
 /* Offsets into the sci_port->irqs array */
@@ -111,6 +112,7 @@ struct sci_port {
        unsigned int            error_clear;
        unsigned int            sampling_rate_mask;
        resource_size_t         reg_size;
+       struct mctrl_gpios      *gpios;
 
        /* Break timer */
        struct timer_list       break_timer;
@@ -139,6 +141,8 @@ struct sci_port {
        struct timer_list               rx_timer;
        unsigned int                    rx_timeout;
 #endif
+
+       bool autorts;
 };
 
 #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
@@ -701,7 +705,6 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
 static void sci_init_pins(struct uart_port *port, unsigned int cflag)
 {
        struct sci_port *s = to_sci_port(port);
-       const struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
 
        /*
         * Use port-specific handler if provided.
@@ -711,21 +714,28 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
                return;
        }
 
-       /*
-        * For the generic path SCSPTR is necessary. Bail out if that's
-        * unavailable, too.
-        */
-       if (!reg->size)
-               return;
-
-       if ((s->cfg->capabilities & SCIx_HAVE_RTSCTS) &&
-           ((!(cflag & CRTSCTS)))) {
-               unsigned short status;
+       if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+               u16 ctrl = serial_port_in(port, SCPCR);
+
+               /* Enable RXD and TXD pin functions */
+               ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC);
+               if (to_sci_port(port)->cfg->capabilities & SCIx_HAVE_RTSCTS) {
+                       /* RTS# is output, driven 1 */
+                       ctrl |= SCPCR_RTSC;
+                       serial_port_out(port, SCPDR,
+                               serial_port_in(port, SCPDR) | SCPDR_RTSD);
+                       /* Enable CTS# pin function */
+                       ctrl &= ~SCPCR_CTSC;
+               }
+               serial_port_out(port, SCPCR, ctrl);
+       } else if (sci_getreg(port, SCSPTR)->size) {
+               u16 status = serial_port_in(port, SCSPTR);
 
-               status = serial_port_in(port, SCSPTR);
-               status &= ~SCSPTR_CTSIO;
-               status |= SCSPTR_RTSIO;
-               serial_port_out(port, SCSPTR, status); /* Set RTS = 1 */
+               /* RTS# is output, driven 1 */
+               status |= SCSPTR_RTSIO | SCSPTR_RTSDT;
+               /* CTS# and SCK are inputs */
+               status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO);
+               serial_port_out(port, SCSPTR, status);
        }
 }
 
@@ -1803,6 +1813,46 @@ static unsigned int sci_tx_empty(struct uart_port *port)
        return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
 }
 
+static void sci_set_rts(struct uart_port *port, bool state)
+{
+       if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+               u16 data = serial_port_in(port, SCPDR);
+
+               /* Active low */
+               if (state)
+                       data &= ~SCPDR_RTSD;
+               else
+                       data |= SCPDR_RTSD;
+               serial_port_out(port, SCPDR, data);
+
+               /* RTS# is output */
+               serial_port_out(port, SCPCR,
+                               serial_port_in(port, SCPCR) | SCPCR_RTSC);
+       } else if (sci_getreg(port, SCSPTR)->size) {
+               u16 ctrl = serial_port_in(port, SCSPTR);
+
+               /* Active low */
+               if (state)
+                       ctrl &= ~SCSPTR_RTSDT;
+               else
+                       ctrl |= SCSPTR_RTSDT;
+               serial_port_out(port, SCSPTR, ctrl);
+       }
+}
+
+static bool sci_get_cts(struct uart_port *port)
+{
+       if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+               /* Active low */
+               return !(serial_port_in(port, SCPDR) & SCPDR_CTSD);
+       } else if (sci_getreg(port, SCSPTR)->size) {
+               /* Active low */
+               return !(serial_port_in(port, SCSPTR) & SCSPTR_CTSDT);
+       }
+
+       return true;
+}
+
 /*
  * Modem control is a bit of a mixed bag for SCI(F) ports. Generally
  * CTS/RTS is supported in hardware by at least one port and controlled
@@ -1817,6 +1867,8 @@ static unsigned int sci_tx_empty(struct uart_port *port)
  */
 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
+       struct sci_port *s = to_sci_port(port);
+
        if (mctrl & TIOCM_LOOP) {
                const struct plat_sci_reg *reg;
 
@@ -1829,25 +1881,72 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
                                        serial_port_in(port, SCFCR) |
                                        SCFCR_LOOP);
        }
+
+       mctrl_gpio_set(s->gpios, mctrl);
+
+       if (!(s->cfg->capabilities & SCIx_HAVE_RTSCTS))
+               return;
+
+       if (!(mctrl & TIOCM_RTS)) {
+               /* Disable Auto RTS */
+               serial_port_out(port, SCFCR,
+                               serial_port_in(port, SCFCR) & ~SCFCR_MCE);
+
+               /* Clear RTS */
+               sci_set_rts(port, 0);
+       } else if (s->autorts) {
+               if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
+                       /* Enable RTS# pin function */
+                       serial_port_out(port, SCPCR,
+                               serial_port_in(port, SCPCR) & ~SCPCR_RTSC);
+               }
+
+               /* Enable Auto RTS */
+               serial_port_out(port, SCFCR,
+                               serial_port_in(port, SCFCR) | SCFCR_MCE);
+       } else {
+               /* Set RTS */
+               sci_set_rts(port, 1);
+       }
 }
 
 static unsigned int sci_get_mctrl(struct uart_port *port)
 {
+       struct sci_port *s = to_sci_port(port);
+       struct mctrl_gpios *gpios = s->gpios;
+       unsigned int mctrl = 0;
+
+       mctrl_gpio_get(gpios, &mctrl);
+
        /*
         * CTS/RTS is handled in hardware when supported, while nothing
-        * else is wired up. Keep it simple and simply assert DSR/CAR.
+        * else is wired up.
         */
-       return TIOCM_DSR | TIOCM_CAR;
+       if (s->autorts) {
+               if (sci_get_cts(port))
+                       mctrl |= TIOCM_CTS;
+       } else if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_CTS))) {
+               mctrl |= TIOCM_CTS;
+       }
+       if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DSR)))
+               mctrl |= TIOCM_DSR;
+       if (IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(gpios, UART_GPIO_DCD)))
+               mctrl |= TIOCM_CAR;
+
+       return mctrl;
+}
+
+static void sci_enable_ms(struct uart_port *port)
+{
+       mctrl_gpio_enable_ms(to_sci_port(port)->gpios);
 }
 
 static void sci_break_ctl(struct uart_port *port, int break_state)
 {
-       struct sci_port *s = to_sci_port(port);
-       const struct plat_sci_reg *reg = sci_regmap[s->cfg->regtype] + SCSPTR;
        unsigned short scscr, scsptr;
 
        /* check wheter the port has SCSPTR */
-       if (!reg->size) {
+       if (!sci_getreg(port, SCSPTR)->size) {
                /*
                 * Not supported by hardware. Most parts couple break and rx
                 * interrupts together, with break detection always enabled.
@@ -1873,7 +1972,6 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
 static int sci_startup(struct uart_port *port)
 {
        struct sci_port *s = to_sci_port(port);
-       unsigned long flags;
        int ret;
 
        dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
@@ -1884,11 +1982,6 @@ static int sci_startup(struct uart_port *port)
 
        sci_request_dma(port);
 
-       spin_lock_irqsave(&port->lock, flags);
-       sci_start_tx(port);
-       sci_start_rx(port);
-       spin_unlock_irqrestore(&port->lock, flags);
-
        return 0;
 }
 
@@ -1896,12 +1989,19 @@ static void sci_shutdown(struct uart_port *port)
 {
        struct sci_port *s = to_sci_port(port);
        unsigned long flags;
+       u16 scr;
 
        dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
 
+       s->autorts = false;
+       mctrl_gpio_disable_ms(to_sci_port(port)->gpios);
+
        spin_lock_irqsave(&port->lock, flags);
        sci_stop_rx(port);
        sci_stop_tx(port);
+       /* Stop RX and TX, disable related interrupts, keep clock source */
+       scr = serial_port_in(port, SCSCR);
+       serial_port_out(port, SCSCR, scr & (SCSCR_CKE1 | SCSCR_CKE0));
        spin_unlock_irqrestore(&port->lock, flags);
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -2056,6 +2156,15 @@ static void sci_reset(struct uart_port *port)
        reg = sci_getreg(port, SCFCR);
        if (reg->size)
                serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+
+       sci_clear_SCxSR(port,
+                       SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
+                       SCxSR_BREAK_CLEAR(port));
+       if (sci_getreg(port, SCLSR)->size) {
+               status = serial_port_in(port, SCLSR);
+               status &= ~(SCLSR_TO | SCLSR_ORER);
+               serial_port_out(port, SCLSR, status);
+       }
 }
 
 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
@@ -2218,15 +2327,18 @@ done:
 
        sci_init_pins(port, termios->c_cflag);
 
+       port->status &= ~UPSTAT_AUTOCTS;
+       s->autorts = false;
        reg = sci_getreg(port, SCFCR);
        if (reg->size) {
                unsigned short ctrl = serial_port_in(port, SCFCR);
 
-               if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
-                       if (termios->c_cflag & CRTSCTS)
-                               ctrl |= SCFCR_MCE;
-                       else
-                               ctrl &= ~SCFCR_MCE;
+               if ((port->flags & UPF_HARD_FLOW) &&
+                   (termios->c_cflag & CRTSCTS)) {
+                       /* There is no CTS interrupt to restart the hardware */
+                       port->status |= UPSTAT_AUTOCTS;
+                       /* MCE is enabled when RTS is raised */
+                       s->autorts = true;
                }
 
                /*
@@ -2300,6 +2412,9 @@ done:
                sci_start_rx(port);
 
        sci_port_disable(s);
+
+       if (UART_ENABLE_MS(port, termios->c_cflag))
+               sci_enable_ms(port);
 }
 
 static void sci_pm(struct uart_port *port, unsigned int state,
@@ -2425,6 +2540,7 @@ static struct uart_ops sci_uart_ops = {
        .start_tx       = sci_start_tx,
        .stop_tx        = sci_stop_tx,
        .stop_rx        = sci_stop_rx,
+       .enable_ms      = sci_enable_ms,
        .break_ctl      = sci_break_ctl,
        .startup        = sci_startup,
        .shutdown       = sci_shutdown,
@@ -2890,6 +3006,9 @@ sci_parse_dt(struct platform_device *pdev, unsigned int *dev_id)
        p->regtype = SCI_OF_REGTYPE(match->data);
        p->scscr = SCSCR_RE | SCSCR_TE;
 
+       if (of_find_property(np, "uart-has-rtscts", NULL))
+               p->capabilities |= SCIx_HAVE_RTSCTS;
+
        return p;
 }
 
@@ -2912,6 +3031,21 @@ static int sci_probe_single(struct platform_device *dev,
        if (ret)
                return ret;
 
+       sciport->gpios = mctrl_gpio_init(&sciport->port, 0);
+       if (IS_ERR(sciport->gpios) && PTR_ERR(sciport->gpios) != -ENOSYS)
+               return PTR_ERR(sciport->gpios);
+
+       if (p->capabilities & SCIx_HAVE_RTSCTS) {
+               if (!IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
+                                                       UART_GPIO_CTS)) ||
+                   !IS_ERR_OR_NULL(mctrl_gpio_to_gpiod(sciport->gpios,
+                                                       UART_GPIO_RTS))) {
+                       dev_err(&dev->dev, "Conflicting RTS/CTS config\n");
+                       return -EINVAL;
+               }
+               sciport->port.flags |= UPF_HARD_FLOW;
+       }
+
        ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
        if (ret) {
                sci_cleanup_single(sciport);
index 7a4fa185b93ef30792d58a24d7f26dbb2ec0156e..ffa6d688c335f252c453e821f246b4bbe357ea88 100644 (file)
@@ -105,13 +105,16 @@ enum {
 #define SCFCR_LOOP     BIT(0)  /* Loopback Test */
 
 /* SCLSR (Line Status Register) on (H)SCIF */
+#define SCLSR_TO       BIT(2)  /* Timeout */
 #define SCLSR_ORER     BIT(0)  /* Overrun Error */
 
 /* SCSPTR (Serial Port Register), optional */
-#define SCSPTR_RTSIO   BIT(7)  /* Serial Port RTS Pin Input/Output */
-#define SCSPTR_RTSDT   BIT(6)  /* Serial Port RTS Pin Data */
-#define SCSPTR_CTSIO   BIT(5)  /* Serial Port CTS Pin Input/Output */
-#define SCSPTR_CTSDT   BIT(4)  /* Serial Port CTS Pin Data */
+#define SCSPTR_RTSIO   BIT(7)  /* Serial Port RTS# Pin Input/Output */
+#define SCSPTR_RTSDT   BIT(6)  /* Serial Port RTS# Pin Data */
+#define SCSPTR_CTSIO   BIT(5)  /* Serial Port CTS# Pin Input/Output */
+#define SCSPTR_CTSDT   BIT(4)  /* Serial Port CTS# Pin Data */
+#define SCSPTR_SCKIO   BIT(3)  /* Serial Port Clock Pin Input/Output */
+#define SCSPTR_SCKDT   BIT(2)  /* Serial Port Clock Pin Data */
 #define SCSPTR_SPB2IO  BIT(1)  /* Serial Port Break Input/Output */
 #define SCSPTR_SPB2DT  BIT(0)  /* Serial Port Break Data */
 
@@ -119,12 +122,18 @@ enum {
 #define HSCIF_SRE      BIT(15) /* Sampling Rate Register Enable */
 
 /* SCPCR (Serial Port Control Register), SCIFA/SCIFB only */
-#define SCPCR_RTSC     BIT(4)  /* Serial Port RTS Pin / Output Pin */
-#define SCPCR_CTSC     BIT(3)  /* Serial Port CTS Pin / Input Pin */
+#define SCPCR_RTSC     BIT(4)  /* Serial Port RTS# Pin / Output Pin */
+#define SCPCR_CTSC     BIT(3)  /* Serial Port CTS# Pin / Input Pin */
+#define SCPCR_SCKC     BIT(2)  /* Serial Port SCK Pin / Output Pin */
+#define SCPCR_RXDC     BIT(1)  /* Serial Port RXD Pin / Input Pin */
+#define SCPCR_TXDC     BIT(0)  /* Serial Port TXD Pin / Output Pin */
 
 /* SCPDR (Serial Port Data Register), SCIFA/SCIFB only */
-#define SCPDR_RTSD     BIT(4)  /* Serial Port RTS Output Pin Data */
-#define SCPDR_CTSD     BIT(3)  /* Serial Port CTS Input Pin Data */
+#define SCPDR_RTSD     BIT(4)  /* Serial Port RTS# Output Pin Data */
+#define SCPDR_CTSD     BIT(3)  /* Serial Port CTS# Input Pin Data */
+#define SCPDR_SCKD     BIT(2)  /* Serial Port SCK Output Pin Data */
+#define SCPDR_RXDD     BIT(1)  /* Serial Port RXD Input Pin Data */
+#define SCPDR_TXDD     BIT(0)  /* Serial Port TXD Output Pin Data */
 
 /*
  * BRG Clock Select Register (Some SCIF and HSCIF)
index c3a885b4d76a73b41a3ed94c0abf920c012b88af..43756bd9111c701e4d91a3b9d5e7a7f35c08d546 100644 (file)
@@ -106,7 +106,7 @@ struct sirfsoc_uart_register {
        enum sirfsoc_uart_type uart_type;
 };
 
-u32 uart_usp_ff_full_mask(struct uart_port *port)
+static u32 uart_usp_ff_full_mask(struct uart_port *port)
 {
        u32 full_bit;
 
@@ -114,7 +114,7 @@ u32 uart_usp_ff_full_mask(struct uart_port *port)
        return (1 << full_bit);
 }
 
-u32 uart_usp_ff_empty_mask(struct uart_port *port)
+static u32 uart_usp_ff_empty_mask(struct uart_port *port)
 {
        u32 empty_bit;
 
index b384060e3b1ff3c0c290133d1a0625e05c00d91b..23cfc5e16b45b5200f9c29e5f12b6da3c73e4e62 100644 (file)
@@ -21,7 +21,6 @@
 
 #include <linux/hrtimer.h>
 #include <linux/delay.h>
-#include <linux/module.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/irq.h>
@@ -730,22 +729,12 @@ static int vt8500_serial_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int vt8500_serial_remove(struct platform_device *pdev)
-{
-       struct vt8500_port *vt8500_port = platform_get_drvdata(pdev);
-
-       clk_disable_unprepare(vt8500_port->clk);
-       uart_remove_one_port(&vt8500_uart_driver, &vt8500_port->uart);
-
-       return 0;
-}
-
 static struct platform_driver vt8500_platform_driver = {
        .probe  = vt8500_serial_probe,
-       .remove = vt8500_serial_remove,
        .driver = {
                .name = "vt8500_serial",
                .of_match_table = wmt_dt_ids,
+               .suppress_bind_attrs = true,
        },
 };
 
@@ -764,19 +753,4 @@ static int __init vt8500_serial_init(void)
 
        return ret;
 }
-
-static void __exit vt8500_serial_exit(void)
-{
-#ifdef CONFIG_SERIAL_VT8500_CONSOLE
-       unregister_console(&vt8500_console);
-#endif
-       platform_driver_unregister(&vt8500_platform_driver);
-       uart_unregister_driver(&vt8500_uart_driver);
-}
-
-module_init(vt8500_serial_init);
-module_exit(vt8500_serial_exit);
-
-MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
-MODULE_DESCRIPTION("Driver for vt8500 serial device");
-MODULE_LICENSE("GPL v2");
+device_initcall(vt8500_serial_init);
index cd46e64c4255559580ce7adf279300902f504975..9ca1a4d1b66a1e02186a438af498a4ac2ec5ea29 100644 (file)
@@ -976,6 +976,23 @@ static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
 }
 #endif
 
+static void cdns_uart_pm(struct uart_port *port, unsigned int state,
+                  unsigned int oldstate)
+{
+       struct cdns_uart *cdns_uart = port->private_data;
+
+       switch (state) {
+       case UART_PM_STATE_OFF:
+               clk_disable(cdns_uart->uartclk);
+               clk_disable(cdns_uart->pclk);
+               break;
+       default:
+               clk_enable(cdns_uart->pclk);
+               clk_enable(cdns_uart->uartclk);
+               break;
+       }
+}
+
 static struct uart_ops cdns_uart_ops = {
        .set_mctrl      = cdns_uart_set_mctrl,
        .get_mctrl      = cdns_uart_get_mctrl,
@@ -987,6 +1004,7 @@ static struct uart_ops cdns_uart_ops = {
        .set_termios    = cdns_uart_set_termios,
        .startup        = cdns_uart_startup,
        .shutdown       = cdns_uart_shutdown,
+       .pm             = cdns_uart_pm,
        .type           = cdns_uart_type,
        .verify_port    = cdns_uart_verify_port,
        .request_port   = cdns_uart_request_port,
@@ -1350,12 +1368,12 @@ static int cdns_uart_probe(struct platform_device *pdev)
                return PTR_ERR(cdns_uart_data->uartclk);
        }
 
-       rc = clk_prepare_enable(cdns_uart_data->pclk);
+       rc = clk_prepare(cdns_uart_data->pclk);
        if (rc) {
                dev_err(&pdev->dev, "Unable to enable pclk clock.\n");
                return rc;
        }
-       rc = clk_prepare_enable(cdns_uart_data->uartclk);
+       rc = clk_prepare(cdns_uart_data->uartclk);
        if (rc) {
                dev_err(&pdev->dev, "Unable to enable device clock.\n");
                goto err_out_clk_dis_pclk;
@@ -1422,9 +1440,9 @@ err_out_notif_unreg:
                        &cdns_uart_data->clk_rate_change_nb);
 #endif
 err_out_clk_disable:
-       clk_disable_unprepare(cdns_uart_data->uartclk);
+       clk_unprepare(cdns_uart_data->uartclk);
 err_out_clk_dis_pclk:
-       clk_disable_unprepare(cdns_uart_data->pclk);
+       clk_unprepare(cdns_uart_data->pclk);
 
        return rc;
 }
@@ -1448,8 +1466,8 @@ static int cdns_uart_remove(struct platform_device *pdev)
 #endif
        rc = uart_remove_one_port(&cdns_uart_uart_driver, port);
        port->mapbase = 0;
-       clk_disable_unprepare(cdns_uart_data->uartclk);
-       clk_disable_unprepare(cdns_uart_data->pclk);
+       clk_unprepare(cdns_uart_data->uartclk);
+       clk_unprepare(cdns_uart_data->pclk);
        return rc;
 }
 
index c8c91f0476a22d1510191483009fea621187722d..9d7ab7b66a8a18999c9a0afb94b9a1bfed8bce4a 100644 (file)
@@ -499,9 +499,8 @@ con_insert_unipair(struct uni_pagedir *p, u_short unicode, u_short fontpos)
        return 0;
 }
 
-/* ui is a leftover from using a hashtable, but might be used again
-   Caller must hold the lock */
-static int con_do_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+/* Caller must hold the lock */
+static int con_do_clear_unimap(struct vc_data *vc)
 {
        struct uni_pagedir *p, *q;
 
@@ -524,11 +523,11 @@ static int con_do_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
        return 0;
 }
 
-int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+int con_clear_unimap(struct vc_data *vc)
 {
        int ret;
        console_lock();
-       ret = con_do_clear_unimap(vc, ui);
+       ret = con_do_clear_unimap(vc);
        console_unlock();
        return ret;
 }
@@ -556,7 +555,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
                int j, k;
                u16 **p1, *p2, l;
                
-               err1 = con_do_clear_unimap(vc, NULL);
+               err1 = con_do_clear_unimap(vc);
                if (err1) {
                        console_unlock();
                        return err1;
@@ -677,7 +676,7 @@ int con_set_default_unimap(struct vc_data *vc)
        
        /* The default font is always 256 characters */
 
-       err = con_do_clear_unimap(vc, NULL);
+       err = con_do_clear_unimap(vc);
        if (err)
                return err;
     
index f973bfce5d089256086b945fb37148fffebf902d..0f8caae4267df8b3a19e39011730478f2c63c9d3 100644 (file)
@@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
 
 static void do_compute_shiftstate(void)
 {
-       unsigned int i, j, k, sym, val;
+       unsigned int k, sym, val;
 
        shift_state = 0;
        memset(shift_down, 0, sizeof(shift_down));
 
-       for (i = 0; i < ARRAY_SIZE(key_down); i++) {
-
-               if (!key_down[i])
+       for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
+               sym = U(key_maps[0][k]);
+               if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
                        continue;
 
-               k = i * BITS_PER_LONG;
-
-               for (j = 0; j < BITS_PER_LONG; j++, k++) {
-
-                       if (!test_bit(k, key_down))
-                               continue;
-
-                       sym = U(key_maps[0][k]);
-                       if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
-                               continue;
-
-                       val = KVAL(sym);
-                       if (val == KVAL(K_CAPSSHIFT))
-                               val = KVAL(K_SHIFT);
+               val = KVAL(sym);
+               if (val == KVAL(K_CAPSSHIFT))
+                       val = KVAL(K_SHIFT);
 
-                       shift_down[val]++;
-                       shift_state |= (1 << val);
-               }
+               shift_down[val]++;
+               shift_state |= BIT(val);
        }
 }
 
@@ -579,7 +567,7 @@ static void fn_scroll_forw(struct vc_data *vc)
 
 static void fn_scroll_back(struct vc_data *vc)
 {
-       scrollback(vc, 0);
+       scrollback(vc);
 }
 
 static void fn_show_mem(struct vc_data *vc)
@@ -1745,16 +1733,10 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
                        return -EINVAL;
 
                if (ct) {
-                       buf = kmalloc(ct * sizeof(struct kbdiacruc),
-                                                               GFP_KERNEL);
-                       if (buf == NULL)
-                               return -ENOMEM;
-
-                       if (copy_from_user(buf, a->kbdiacruc,
-                                       ct * sizeof(struct kbdiacruc))) {
-                               kfree(buf);
-                               return -EFAULT;
-                       }
+                       buf = memdup_user(a->kbdiacruc,
+                                         ct * sizeof(struct kbdiacruc));
+                       if (IS_ERR(buf))
+                               return PTR_ERR(buf);
                } 
                spin_lock_irqsave(&kbd_event_lock, flags);
                if (ct)
index 5b0fe97c46ca9fb0bff6b66e0638226bf1b5781d..2705ca960e92ea89075b3849839c31202077a276 100644 (file)
@@ -277,13 +277,15 @@ static void notify_update(struct vc_data *vc)
  *     Low-Level Functions
  */
 
-#define IS_FG(vc)      ((vc)->vc_num == fg_console)
+static inline bool con_is_fg(const struct vc_data *vc)
+{
+       return vc->vc_num == fg_console;
+}
 
-#ifdef VT_BUF_VRAM_ONLY
-#define DO_UPDATE(vc)  0
-#else
-#define DO_UPDATE(vc)  (CON_IS_VISIBLE(vc) && !console_blanked)
-#endif
+static inline bool con_should_update(const struct vc_data *vc)
+{
+       return con_is_visible(vc) && !console_blanked;
+}
 
 static inline unsigned short *screenpos(struct vc_data *vc, int offset, int viewed)
 {
@@ -321,7 +323,7 @@ static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
                nr = b - t - 1;
        if (b > vc->vc_rows || t >= b || nr < 1)
                return;
-       if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
+       if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_UP, nr))
                return;
        d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
        s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
@@ -339,7 +341,7 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
                nr = b - t - 1;
        if (b > vc->vc_rows || t >= b || nr < 1)
                return;
-       if (CON_IS_VISIBLE(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
+       if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, SM_DOWN, nr))
                return;
        s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
        step = vc->vc_cols * nr;
@@ -349,7 +351,6 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
 
 static void do_update_region(struct vc_data *vc, unsigned long start, int count)
 {
-#ifndef VT_BUF_VRAM_ONLY
        unsigned int xx, yy, offset;
        u16 *p;
 
@@ -390,14 +391,13 @@ static void do_update_region(struct vc_data *vc, unsigned long start, int count)
                        start = vc->vc_sw->con_getxy(vc, start, NULL, NULL);
                }
        }
-#endif
 }
 
 void update_region(struct vc_data *vc, unsigned long start, int count)
 {
        WARN_CONSOLE_UNLOCKED();
 
-       if (DO_UPDATE(vc)) {
+       if (con_should_update(vc)) {
                hide_cursor(vc);
                do_update_region(vc, start, count);
                set_cursor(vc);
@@ -413,7 +413,6 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
                return vc->vc_sw->con_build_attr(vc, _color, _intensity,
                       _blink, _underline, _reverse, _italic);
 
-#ifndef VT_BUF_VRAM_ONLY
 /*
  * ++roman: I completely changed the attribute format for monochrome
  * mode (!can_do_color). The formerly used MDA (monochrome display
@@ -448,9 +447,6 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
                a <<= 1;
        return a;
        }
-#else
-       return 0;
-#endif
 }
 
 static void update_attr(struct vc_data *vc)
@@ -470,10 +466,9 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
 
        count /= 2;
        p = screenpos(vc, offset, viewed);
-       if (vc->vc_sw->con_invert_region)
+       if (vc->vc_sw->con_invert_region) {
                vc->vc_sw->con_invert_region(vc, p, count);
-#ifndef VT_BUF_VRAM_ONLY
-       else {
+       } else {
                u16 *q = p;
                int cnt = count;
                u16 a;
@@ -501,8 +496,8 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
                        }
                }
        }
-#endif
-       if (DO_UPDATE(vc))
+
+       if (con_should_update(vc))
                do_update_region(vc, (unsigned long) p, count);
        notify_update(vc);
 }
@@ -519,7 +514,7 @@ void complement_pos(struct vc_data *vc, int offset)
        if (old_offset != -1 && old_offset >= 0 &&
            old_offset < vc->vc_screenbuf_size) {
                scr_writew(old, screenpos(vc, old_offset, 1));
-               if (DO_UPDATE(vc))
+               if (con_should_update(vc))
                        vc->vc_sw->con_putc(vc, old, oldy, oldx);
                notify_update(vc);
        }
@@ -534,7 +529,7 @@ void complement_pos(struct vc_data *vc, int offset)
                old = scr_readw(p);
                new = old ^ vc->vc_complement_mask;
                scr_writew(new, p);
-               if (DO_UPDATE(vc)) {
+               if (con_should_update(vc)) {
                        oldx = (offset >> 1) % vc->vc_cols;
                        oldy = (offset >> 1) / vc->vc_cols;
                        vc->vc_sw->con_putc(vc, new, oldy, oldx);
@@ -550,7 +545,7 @@ static void insert_char(struct vc_data *vc, unsigned int nr)
        scr_memmovew(p + nr, p, (vc->vc_cols - vc->vc_x - nr) * 2);
        scr_memsetw(p, vc->vc_video_erase_char, nr * 2);
        vc->vc_need_wrap = 0;
-       if (DO_UPDATE(vc))
+       if (con_should_update(vc))
                do_update_region(vc, (unsigned long) p,
                        vc->vc_cols - vc->vc_x);
 }
@@ -563,7 +558,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
        scr_memsetw(p + vc->vc_cols - vc->vc_x - nr, vc->vc_video_erase_char,
                        nr * 2);
        vc->vc_need_wrap = 0;
-       if (DO_UPDATE(vc))
+       if (con_should_update(vc))
                do_update_region(vc, (unsigned long) p,
                        vc->vc_cols - vc->vc_x);
 }
@@ -583,7 +578,7 @@ static void add_softcursor(struct vc_data *vc)
        if ((type & 0x20) && ((softcursor_original & 0x7000) == (i & 0x7000))) i ^= 0x7000;
        if ((type & 0x40) && ((i & 0x700) == ((i & 0x7000) >> 4))) i ^= 0x0700;
        scr_writew(i, (u16 *) vc->vc_pos);
-       if (DO_UPDATE(vc))
+       if (con_should_update(vc))
                vc->vc_sw->con_putc(vc, i, vc->vc_y, vc->vc_x);
 }
 
@@ -591,7 +586,7 @@ static void hide_softcursor(struct vc_data *vc)
 {
        if (softcursor_original != -1) {
                scr_writew(softcursor_original, (u16 *)vc->vc_pos);
-               if (DO_UPDATE(vc))
+               if (con_should_update(vc))
                        vc->vc_sw->con_putc(vc, softcursor_original,
                                        vc->vc_y, vc->vc_x);
                softcursor_original = -1;
@@ -608,8 +603,7 @@ static void hide_cursor(struct vc_data *vc)
 
 static void set_cursor(struct vc_data *vc)
 {
-       if (!IS_FG(vc) || console_blanked ||
-           vc->vc_mode == KD_GRAPHICS)
+       if (!con_is_fg(vc) || console_blanked || vc->vc_mode == KD_GRAPHICS)
                return;
        if (vc->vc_deccm) {
                if (vc == sel_cons)
@@ -625,7 +619,7 @@ static void set_origin(struct vc_data *vc)
 {
        WARN_CONSOLE_UNLOCKED();
 
-       if (!CON_IS_VISIBLE(vc) ||
+       if (!con_is_visible(vc) ||
            !vc->vc_sw->con_set_origin ||
            !vc->vc_sw->con_set_origin(vc))
                vc->vc_origin = (unsigned long)vc->vc_screenbuf;
@@ -673,12 +667,12 @@ void redraw_screen(struct vc_data *vc, int is_switch)
                struct vc_data *old_vc = vc_cons[fg_console].d;
                if (old_vc == vc)
                        return;
-               if (!CON_IS_VISIBLE(vc))
+               if (!con_is_visible(vc))
                        redraw = 1;
                *vc->vc_display_fg = vc;
                fg_console = vc->vc_num;
                hide_cursor(old_vc);
-               if (!CON_IS_VISIBLE(old_vc)) {
+               if (!con_is_visible(old_vc)) {
                        save_screen(old_vc);
                        set_origin(old_vc);
                }
@@ -954,7 +948,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
                tty_do_resize(tty, &ws);
        }
 
-       if (CON_IS_VISIBLE(vc))
+       if (con_is_visible(vc))
                update_screen(vc);
        vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
        return err;
@@ -1103,11 +1097,9 @@ static void gotoxay(struct vc_data *vc, int new_x, int new_y)
        gotoxy(vc, new_x, vc->vc_decom ? (vc->vc_top + new_y) : new_y);
 }
 
-void scrollback(struct vc_data *vc, int lines)
+void scrollback(struct vc_data *vc)
 {
-       if (!lines)
-               lines = vc->vc_rows / 2;
-       scrolldelta(-lines);
+       scrolldelta(-(vc->vc_rows / 2));
 }
 
 void scrollfront(struct vc_data *vc, int lines)
@@ -1186,7 +1178,7 @@ static void csi_J(struct vc_data *vc, int vpar)
                        scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
                                    vc->vc_screenbuf_size >> 1);
                        set_origin(vc);
-                       if (CON_IS_VISIBLE(vc))
+                       if (con_is_visible(vc))
                                update_screen(vc);
                        /* fall through */
                case 2: /* erase whole display */
@@ -1197,7 +1189,7 @@ static void csi_J(struct vc_data *vc, int vpar)
                        return;
        }
        scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
-       if (DO_UPDATE(vc))
+       if (con_should_update(vc))
                do_update_region(vc, (unsigned long) start, count);
        vc->vc_need_wrap = 0;
 }
@@ -1225,7 +1217,7 @@ static void csi_K(struct vc_data *vc, int vpar)
        }
        scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
        vc->vc_need_wrap = 0;
-       if (DO_UPDATE(vc))
+       if (con_should_update(vc))
                do_update_region(vc, (unsigned long) start, count);
 }
 
@@ -1238,7 +1230,7 @@ static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar posi
        count = (vpar > vc->vc_cols - vc->vc_x) ? (vc->vc_cols - vc->vc_x) : vpar;
 
        scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count);
-       if (DO_UPDATE(vc))
+       if (con_should_update(vc))
                vc->vc_sw->con_clear(vc, vc->vc_y, vc->vc_x, 1, count);
        vc->vc_need_wrap = 0;
 }
@@ -1255,48 +1247,87 @@ static void default_attr(struct vc_data *vc)
 
 struct rgb { u8 r; u8 g; u8 b; };
 
-static struct rgb rgb_from_256(int i)
+static void rgb_from_256(int i, struct rgb *c)
 {
-       struct rgb c;
        if (i < 8) {            /* Standard colours. */
-               c.r = i&1 ? 0xaa : 0x00;
-               c.g = i&2 ? 0xaa : 0x00;
-               c.b = i&4 ? 0xaa : 0x00;
+               c->r = i&1 ? 0xaa : 0x00;
+               c->g = i&2 ? 0xaa : 0x00;
+               c->b = i&4 ? 0xaa : 0x00;
        } else if (i < 16) {
-               c.r = i&1 ? 0xff : 0x55;
-               c.g = i&2 ? 0xff : 0x55;
-               c.b = i&4 ? 0xff : 0x55;
+               c->r = i&1 ? 0xff : 0x55;
+               c->g = i&2 ? 0xff : 0x55;
+               c->b = i&4 ? 0xff : 0x55;
        } else if (i < 232) {   /* 6x6x6 colour cube. */
-               c.r = (i - 16) / 36 * 85 / 2;
-               c.g = (i - 16) / 6 % 6 * 85 / 2;
-               c.b = (i - 16) % 6 * 85 / 2;
+               c->r = (i - 16) / 36 * 85 / 2;
+               c->g = (i - 16) / 6 % 6 * 85 / 2;
+               c->b = (i - 16) % 6 * 85 / 2;
        } else                  /* Grayscale ramp. */
-               c.r = c.g = c.b = i * 10 - 2312;
-       return c;
+               c->r = c->g = c->b = i * 10 - 2312;
 }
 
-static void rgb_foreground(struct vc_data *vc, struct rgb c)
+static void rgb_foreground(struct vc_data *vc, const struct rgb *c)
 {
-       u8 hue, max = c.r;
-       if (c.g > max)
-               max = c.g;
-       if (c.b > max)
-               max = c.b;
-       hue = (c.r > max/2 ? 4 : 0)
-           | (c.g > max/2 ? 2 : 0)
-           | (c.b > max/2 ? 1 : 0);
-       if (hue == 7 && max <= 0x55)
-               hue = 0, vc->vc_intensity = 2;
+       u8 hue = 0, max = max3(c->r, c->g, c->b);
+
+       if (c->r > max / 2)
+               hue |= 4;
+       if (c->g > max / 2)
+               hue |= 2;
+       if (c->b > max / 2)
+               hue |= 1;
+
+       if (hue == 7 && max <= 0x55) {
+               hue = 0;
+               vc->vc_intensity = 2;
+       } else if (max > 0xaa)
+               vc->vc_intensity = 2;
        else
-               vc->vc_intensity = (max > 0xaa) + 1;
+               vc->vc_intensity = 1;
+
        vc->vc_color = (vc->vc_color & 0xf0) | hue;
 }
 
-static void rgb_background(struct vc_data *vc, struct rgb c)
+static void rgb_background(struct vc_data *vc, const struct rgb *c)
 {
        /* For backgrounds, err on the dark side. */
        vc->vc_color = (vc->vc_color & 0x0f)
-               | (c.r&0x80) >> 1 | (c.g&0x80) >> 2 | (c.b&0x80) >> 3;
+               | (c->r&0x80) >> 1 | (c->g&0x80) >> 2 | (c->b&0x80) >> 3;
+}
+
+/*
+ * ITU T.416 Higher colour modes. They break the usual properties of SGR codes
+ * and thus need to be detected and ignored by hand. Strictly speaking, that
+ * standard also wants : rather than ; as separators, contrary to ECMA-48, but
+ * no one produces such codes and almost no one accepts them.
+ *
+ * Subcommands 3 (CMY) and 4 (CMYK) are so insane there's no point in
+ * supporting them.
+ */
+static int vc_t416_color(struct vc_data *vc, int i,
+               void(*set_color)(struct vc_data *vc, const struct rgb *c))
+{
+       struct rgb c;
+
+       i++;
+       if (i > vc->vc_npar)
+               return i;
+
+       if (vc->vc_par[i] == 5 && i < vc->vc_npar) {
+               /* 256 colours -- ubiquitous */
+               i++;
+               rgb_from_256(vc->vc_par[i], &c);
+       } else if (vc->vc_par[i] == 2 && i <= vc->vc_npar + 3) {
+               /* 24 bit -- extremely rare */
+               c.r = vc->vc_par[i + 1];
+               c.g = vc->vc_par[i + 2];
+               c.b = vc->vc_par[i + 3];
+               i += 3;
+       } else
+               return i;
+
+       set_color(vc, &c);
+
+       return i;
 }
 
 /* console_lock is held */
@@ -1306,135 +1337,91 @@ static void csi_m(struct vc_data *vc)
 
        for (i = 0; i <= vc->vc_npar; i++)
                switch (vc->vc_par[i]) {
-                       case 0: /* all attributes off */
-                               default_attr(vc);
-                               break;
-                       case 1:
-                               vc->vc_intensity = 2;
-                               break;
-                       case 2:
-                               vc->vc_intensity = 0;
-                               break;
-                       case 3:
-                               vc->vc_italic = 1;
-                               break;
-                       case 4:
-                               vc->vc_underline = 1;
-                               break;
-                       case 5:
-                               vc->vc_blink = 1;
-                               break;
-                       case 7:
-                               vc->vc_reverse = 1;
-                               break;
-                       case 10: /* ANSI X3.64-1979 (SCO-ish?)
-                                 * Select primary font, don't display
-                                 * control chars if defined, don't set
-                                 * bit 8 on output.
-                                 */
-                               vc->vc_translate = set_translate(vc->vc_charset == 0
-                                               ? vc->vc_G0_charset
-                                               : vc->vc_G1_charset, vc);
-                               vc->vc_disp_ctrl = 0;
-                               vc->vc_toggle_meta = 0;
-                               break;
-                       case 11: /* ANSI X3.64-1979 (SCO-ish?)
-                                 * Select first alternate font, lets
-                                 * chars < 32 be displayed as ROM chars.
-                                 */
-                               vc->vc_translate = set_translate(IBMPC_MAP, vc);
-                               vc->vc_disp_ctrl = 1;
-                               vc->vc_toggle_meta = 0;
-                               break;
-                       case 12: /* ANSI X3.64-1979 (SCO-ish?)
-                                 * Select second alternate font, toggle
-                                 * high bit before displaying as ROM char.
-                                 */
-                               vc->vc_translate = set_translate(IBMPC_MAP, vc);
-                               vc->vc_disp_ctrl = 1;
-                               vc->vc_toggle_meta = 1;
-                               break;
-                       case 21:
-                       case 22:
-                               vc->vc_intensity = 1;
-                               break;
-                       case 23:
-                               vc->vc_italic = 0;
-                               break;
-                       case 24:
-                               vc->vc_underline = 0;
-                               break;
-                       case 25:
-                               vc->vc_blink = 0;
-                               break;
-                       case 27:
-                               vc->vc_reverse = 0;
-                               break;
-                       case 38: /* ITU T.416
-                                 * Higher colour modes.
-                                 * They break the usual properties of SGR codes
-                                 * and thus need to be detected and ignored by
-                                 * hand.  Strictly speaking, that standard also
-                                 * wants : rather than ; as separators, contrary
-                                 * to ECMA-48, but no one produces such codes
-                                 * and almost no one accepts them.
-                                 */
-                               i++;
-                               if (i > vc->vc_npar)
-                                       break;
-                               if (vc->vc_par[i] == 5 &&  /* 256 colours */
-                                   i < vc->vc_npar) {     /* ubiquitous */
-                                       i++;
-                                       rgb_foreground(vc,
-                                               rgb_from_256(vc->vc_par[i]));
-                               } else if (vc->vc_par[i] == 2 &&  /* 24 bit */
-                                          i <= vc->vc_npar + 3) {/* extremely rare */
-                                       struct rgb c = {
-                                               .r = vc->vc_par[i + 1],
-                                               .g = vc->vc_par[i + 2],
-                                               .b = vc->vc_par[i + 3],
-                                       };
-                                       rgb_foreground(vc, c);
-                                       i += 3;
-                               }
-                               /* Subcommands 3 (CMY) and 4 (CMYK) are so insane
-                                * there's no point in supporting them.
-                                */
-                               break;
-                       case 48:
-                               i++;
-                               if (i > vc->vc_npar)
-                                       break;
-                               if (vc->vc_par[i] == 5 &&  /* 256 colours */
-                                   i < vc->vc_npar) {
-                                       i++;
-                                       rgb_background(vc,
-                                               rgb_from_256(vc->vc_par[i]));
-                               } else if (vc->vc_par[i] == 2 && /* 24 bit */
-                                          i <= vc->vc_npar + 3) {
-                                       struct rgb c = {
-                                               .r = vc->vc_par[i + 1],
-                                               .g = vc->vc_par[i + 2],
-                                               .b = vc->vc_par[i + 3],
-                                       };
-                                       rgb_background(vc, c);
-                                       i += 3;
-                               }
-                               break;
-                       case 39:
-                               vc->vc_color = (vc->vc_def_color & 0x0f) | (vc->vc_color & 0xf0);
-                               break;
-                       case 49:
-                               vc->vc_color = (vc->vc_def_color & 0xf0) | (vc->vc_color & 0x0f);
-                               break;
-                       default:
-                               if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
-                                       vc->vc_color = color_table[vc->vc_par[i] - 30]
-                                               | (vc->vc_color & 0xf0);
-                               else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
-                                       vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4)
-                                               | (vc->vc_color & 0x0f);
-                               break;
+               case 0: /* all attributes off */
+                       default_attr(vc);
+                       break;
+               case 1:
+                       vc->vc_intensity = 2;
+                       break;
+               case 2:
+                       vc->vc_intensity = 0;
+                       break;
+               case 3:
+                       vc->vc_italic = 1;
+                       break;
+               case 4:
+                       vc->vc_underline = 1;
+                       break;
+               case 5:
+                       vc->vc_blink = 1;
+                       break;
+               case 7:
+                       vc->vc_reverse = 1;
+                       break;
+               case 10: /* ANSI X3.64-1979 (SCO-ish?)
+                         * Select primary font, don't display control chars if
+                         * defined, don't set bit 8 on output.
+                         */
+                       vc->vc_translate = set_translate(vc->vc_charset == 0
+                                       ? vc->vc_G0_charset
+                                       : vc->vc_G1_charset, vc);
+                       vc->vc_disp_ctrl = 0;
+                       vc->vc_toggle_meta = 0;
+                       break;
+               case 11: /* ANSI X3.64-1979 (SCO-ish?)
+                         * Select first alternate font, lets chars < 32 be
+                         * displayed as ROM chars.
+                         */
+                       vc->vc_translate = set_translate(IBMPC_MAP, vc);
+                       vc->vc_disp_ctrl = 1;
+                       vc->vc_toggle_meta = 0;
+                       break;
+               case 12: /* ANSI X3.64-1979 (SCO-ish?)
+                         * Select second alternate font, toggle high bit
+                         * before displaying as ROM char.
+                         */
+                       vc->vc_translate = set_translate(IBMPC_MAP, vc);
+                       vc->vc_disp_ctrl = 1;
+                       vc->vc_toggle_meta = 1;
+                       break;
+               case 21:
+               case 22:
+                       vc->vc_intensity = 1;
+                       break;
+               case 23:
+                       vc->vc_italic = 0;
+                       break;
+               case 24:
+                       vc->vc_underline = 0;
+                       break;
+               case 25:
+                       vc->vc_blink = 0;
+                       break;
+               case 27:
+                       vc->vc_reverse = 0;
+                       break;
+               case 38:
+                       i = vc_t416_color(vc, i, rgb_foreground);
+                       break;
+               case 48:
+                       i = vc_t416_color(vc, i, rgb_background);
+                       break;
+               case 39:
+                       vc->vc_color = (vc->vc_def_color & 0x0f) |
+                               (vc->vc_color & 0xf0);
+                       break;
+               case 49:
+                       vc->vc_color = (vc->vc_def_color & 0xf0) |
+                               (vc->vc_color & 0x0f);
+                       break;
+               default:
+                       if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
+                               vc->vc_color = color_table[vc->vc_par[i] - 30]
+                                       | (vc->vc_color & 0xf0);
+                       else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
+                               vc->vc_color = (color_table[vc->vc_par[i] - 40] << 4)
+                                       | (vc->vc_color & 0x0f);
+                       break;
                }
        update_attr(vc);
 }
@@ -1496,7 +1483,6 @@ static void set_mode(struct vc_data *vc, int on_off)
                                        clr_kbd(vc, decckm);
                                break;
                        case 3: /* 80/132 mode switch unimplemented */
-                               vc->vc_deccolm = on_off;
 #if 0
                                vc_resize(deccolm ? 132 : 80, vc->vc_rows);
                                /* this alone does not suffice; some user mode
@@ -2178,18 +2164,20 @@ static int is_double_width(uint32_t ucs)
        return bisearch(ucs, double_width, ARRAY_SIZE(double_width) - 1);
 }
 
+static void con_flush(struct vc_data *vc, unsigned long draw_from,
+               unsigned long draw_to, int *draw_x)
+{
+       if (*draw_x < 0)
+               return;
+
+       vc->vc_sw->con_putcs(vc, (u16 *)draw_from,
+                       (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, *draw_x);
+       *draw_x = -1;
+}
+
 /* acquires console_lock */
 static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
-#ifdef VT_BUF_VRAM_ONLY
-#define FLUSH do { } while(0);
-#else
-#define FLUSH if (draw_x >= 0) { \
-       vc->vc_sw->con_putcs(vc, (u16 *)draw_from, (u16 *)draw_to - (u16 *)draw_from, vc->vc_y, draw_x); \
-       draw_x = -1; \
-       }
-#endif
-
        int c, tc, ok, n = 0, draw_x = -1;
        unsigned int currcons;
        unsigned long draw_from = 0, draw_to = 0;
@@ -2226,7 +2214,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
        charmask = himask ? 0x1ff : 0xff;
 
        /* undraw cursor first */
-       if (IS_FG(vc))
+       if (con_is_fg(vc))
                hide_cursor(vc);
 
        param.vc = vc;
@@ -2381,12 +2369,13 @@ rescan_last_byte:
                                } else {
                                        vc_attr = ((vc->vc_attr) & 0x88) | (((vc->vc_attr) & 0x70) >> 4) | (((vc->vc_attr) & 0x07) << 4);
                                }
-                               FLUSH
+                               con_flush(vc, draw_from, draw_to, &draw_x);
                        }
 
                        while (1) {
                                if (vc->vc_need_wrap || vc->vc_decim)
-                                       FLUSH
+                                       con_flush(vc, draw_from, draw_to,
+                                                       &draw_x);
                                if (vc->vc_need_wrap) {
                                        cr(vc);
                                        lf(vc);
@@ -2397,7 +2386,7 @@ rescan_last_byte:
                                             ((vc_attr << 8) & ~himask) + ((tc & 0x100) ? himask : 0) + (tc & 0xff) :
                                             (vc_attr << 8) + tc,
                                           (u16 *) vc->vc_pos);
-                               if (DO_UPDATE(vc) && draw_x < 0) {
+                               if (con_should_update(vc) && draw_x < 0) {
                                        draw_x = vc->vc_x;
                                        draw_from = vc->vc_pos;
                                }
@@ -2416,9 +2405,8 @@ rescan_last_byte:
                        }
                        notify_write(vc, c);
 
-                       if (inverse) {
-                               FLUSH
-                       }
+                       if (inverse)
+                               con_flush(vc, draw_from, draw_to, &draw_x);
 
                        if (rescan) {
                                rescan = 0;
@@ -2429,15 +2417,14 @@ rescan_last_byte:
                        }
                        continue;
                }
-               FLUSH
+               con_flush(vc, draw_from, draw_to, &draw_x);
                do_con_trol(tty, vc, orig);
        }
-       FLUSH
+       con_flush(vc, draw_from, draw_to, &draw_x);
        console_conditional_schedule();
        console_unlock();
        notify_update(vc);
        return n;
-#undef FLUSH
 }
 
 /*
@@ -2471,7 +2458,7 @@ static void console_callback(struct work_struct *ignored)
        if (scrollback_delta) {
                struct vc_data *vc = vc_cons[fg_console].d;
                clear_selection();
-               if (vc->vc_mode == KD_TEXT)
+               if (vc->vc_mode == KD_TEXT && vc->vc_sw->con_scrolldelta)
                        vc->vc_sw->con_scrolldelta(vc, scrollback_delta);
                scrollback_delta = 0;
        }
@@ -2583,7 +2570,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
                goto quit;
 
        /* undraw cursor first */
-       if (IS_FG(vc))
+       if (con_is_fg(vc))
                hide_cursor(vc);
 
        start = (ushort *)vc->vc_pos;
@@ -2594,7 +2581,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
                c = *b++;
                if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
                        if (cnt > 0) {
-                               if (CON_IS_VISIBLE(vc))
+                               if (con_is_visible(vc))
                                        vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
                                vc->vc_x += cnt;
                                if (vc->vc_need_wrap)
@@ -2626,7 +2613,7 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
                myx++;
        }
        if (cnt > 0) {
-               if (CON_IS_VISIBLE(vc))
+               if (con_is_visible(vc))
                        vc->vc_sw->con_putcs(vc, start, cnt, vc->vc_y, vc->vc_x);
                vc->vc_x += cnt;
                if (vc->vc_x == vc->vc_cols) {
@@ -3173,7 +3160,7 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
 
                j = i;
 
-               if (CON_IS_VISIBLE(vc)) {
+               if (con_is_visible(vc)) {
                        k = i;
                        save_screen(vc);
                }
@@ -3981,7 +3968,7 @@ static void set_palette(struct vc_data *vc)
 {
        WARN_CONSOLE_UNLOCKED();
 
-       if (vc->vc_mode != KD_GRAPHICS)
+       if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_set_palette)
                vc->vc_sw->con_set_palette(vc, color_table);
 }
 
index 97d5a74558a3a4477e3c5cfb29dd88591fefbe92..f62c598810ff4ca64ec2df66b769ea0b4e6541e0 100644 (file)
@@ -1006,16 +1006,10 @@ int vt_ioctl(struct tty_struct *tty,
                break;
 
        case PIO_UNIMAPCLR:
-             { struct unimapinit ui;
                if (!perm)
                        return -EPERM;
-               ret = copy_from_user(&ui, up, sizeof(struct unimapinit));
-               if (ret)
-                       ret = -EFAULT;
-               else
-                       con_clear_unimap(vc, &ui);
+               con_clear_unimap(vc);
                break;
-             }
 
        case PIO_UNIMAP:
        case GIO_UNIMAP:
index 3644a3500b700013250f1f841bfe49117f70f563..5e5b9eb7ebf6daa2f8a1764c60fe3dced6bee13f 100644 (file)
@@ -4,8 +4,9 @@ config USB_CHIPIDEA
        select EXTCON
        help
          Say Y here if your system has a dual role high speed USB
-         controller based on ChipIdea silicon IP. Currently, only the
-         peripheral mode is supported.
+         controller based on ChipIdea silicon IP. It supports:
+         Dual-role switch (ID, OTG FSM, sysfs), Host-only, and
+         Peripheral-only.
 
          When compiled dynamically, the module will be called ci-hdrc.ko.
 
index 94a14f5dc4d412ed3c21761613d2ddd5fcacbfde..71912301ef7f804fb9698d7915f7136041617906 100644 (file)
@@ -946,7 +946,7 @@ static int wait_serial_change(struct acm *acm, unsigned long arg)
        DECLARE_WAITQUEUE(wait, current);
        struct async_icount old, new;
 
-       if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD ))
+       if (arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD))
                return -EINVAL;
        do {
                spin_lock_irq(&acm->read_lock);
@@ -1146,7 +1146,7 @@ static int acm_probe(struct usb_interface *intf,
                     const struct usb_device_id *id)
 {
        struct usb_cdc_union_desc *union_header = NULL;
-       struct usb_cdc_country_functional_desc *cfd = NULL;
+       struct usb_cdc_call_mgmt_descriptor *cmgmd = NULL;
        unsigned char *buffer = intf->altsetting->extra;
        int buflen = intf->altsetting->extralen;
        struct usb_interface *control_interface;
@@ -1155,18 +1155,16 @@ static int acm_probe(struct usb_interface *intf,
        struct usb_endpoint_descriptor *epread = NULL;
        struct usb_endpoint_descriptor *epwrite = NULL;
        struct usb_device *usb_dev = interface_to_usbdev(intf);
+       struct usb_cdc_parsed_header h;
        struct acm *acm;
        int minor;
        int ctrlsize, readsize;
        u8 *buf;
-       u8 ac_management_function = 0;
-       u8 call_management_function = 0;
-       int call_interface_num = -1;
-       int data_interface_num = -1;
+       int call_intf_num = -1;
+       int data_intf_num = -1;
        unsigned long quirks;
        int num_rx_buf;
        int i;
-       unsigned int elength = 0;
        int combined_interfaces = 0;
        struct device *tty_dev;
        int rv = -ENOMEM;
@@ -1210,70 +1208,22 @@ static int acm_probe(struct usb_interface *intf,
                }
        }
 
-       while (buflen > 0) {
-               elength = buffer[0];
-               if (!elength) {
-                       dev_err(&intf->dev, "skipping garbage byte\n");
-                       elength = 1;
-                       goto next_desc;
-               }
-               if (buffer[1] != USB_DT_CS_INTERFACE) {
-                       dev_err(&intf->dev, "skipping garbage\n");
-                       goto next_desc;
-               }
-
-               switch (buffer[2]) {
-               case USB_CDC_UNION_TYPE: /* we've found it */
-                       if (elength < sizeof(struct usb_cdc_union_desc))
-                               goto next_desc;
-                       if (union_header) {
-                               dev_err(&intf->dev, "More than one "
-                                       "union descriptor, skipping ...\n");
-                               goto next_desc;
-                       }
-                       union_header = (struct usb_cdc_union_desc *)buffer;
-                       break;
-               case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
-                       if (elength < sizeof(struct usb_cdc_country_functional_desc))
-                               goto next_desc;
-                       cfd = (struct usb_cdc_country_functional_desc *)buffer;
-                       break;
-               case USB_CDC_HEADER_TYPE: /* maybe check version */
-                       break; /* for now we ignore it */
-               case USB_CDC_ACM_TYPE:
-                       if (elength < 4)
-                               goto next_desc;
-                       ac_management_function = buffer[3];
-                       break;
-               case USB_CDC_CALL_MANAGEMENT_TYPE:
-                       if (elength < 5)
-                               goto next_desc;
-                       call_management_function = buffer[3];
-                       call_interface_num = buffer[4];
-                       break;
-               default:
-                       /*
-                        * there are LOTS more CDC descriptors that
-                        * could legitimately be found here.
-                        */
-                       dev_dbg(&intf->dev, "Ignoring descriptor: "
-                                       "type %02x, length %ud\n",
-                                       buffer[2], elength);
-                       break;
-               }
-next_desc:
-               buflen -= elength;
-               buffer += elength;
-       }
+       cdc_parse_cdc_header(&h, intf, buffer, buflen);
+       union_header = h.usb_cdc_union_desc;
+       cmgmd = h.usb_cdc_call_mgmt_descriptor;
+       if (cmgmd)
+               call_intf_num = cmgmd->bDataInterface;
 
        if (!union_header) {
-               if (call_interface_num > 0) {
+               if (call_intf_num > 0) {
                        dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n");
                        /* quirks for Droids MuIn LCD */
-                       if (quirks & NO_DATA_INTERFACE)
+                       if (quirks & NO_DATA_INTERFACE) {
                                data_interface = usb_ifnum_to_if(usb_dev, 0);
-                       else
-                               data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = call_interface_num));
+                       } else {
+                               data_intf_num = call_intf_num;
+                               data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
+                       }
                        control_interface = intf;
                } else {
                        if (intf->cur_altsetting->desc.bNumEndpoints != 3) {
@@ -1287,8 +1237,9 @@ next_desc:
                        }
                }
        } else {
+               data_intf_num = union_header->bSlaveInterface0;
                control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
-               data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
+               data_interface = usb_ifnum_to_if(usb_dev, data_intf_num);
        }
 
        if (!control_interface || !data_interface) {
@@ -1296,7 +1247,7 @@ next_desc:
                return -ENODEV;
        }
 
-       if (data_interface_num != call_interface_num)
+       if (data_intf_num != call_intf_num)
                dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n");
 
        if (control_interface == data_interface) {
@@ -1379,11 +1330,8 @@ made_compressed_probe:
                goto alloc_fail;
 
        minor = acm_alloc_minor(acm);
-       if (minor < 0) {
-               dev_err(&intf->dev, "no more free acm devices\n");
-               kfree(acm);
-               return -ENODEV;
-       }
+       if (minor < 0)
+               goto alloc_fail1;
 
        ctrlsize = usb_endpoint_maxp(epctrl);
        readsize = usb_endpoint_maxp(epread) *
@@ -1394,7 +1342,8 @@ made_compressed_probe:
        acm->data = data_interface;
        acm->minor = minor;
        acm->dev = usb_dev;
-       acm->ctrl_caps = ac_management_function;
+       if (h.usb_cdc_acm_descriptor)
+               acm->ctrl_caps = h.usb_cdc_acm_descriptor->bmCapabilities;
        if (quirks & NO_CAP_LINE)
                acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
        acm->ctrlsize = ctrlsize;
@@ -1488,7 +1437,10 @@ made_compressed_probe:
        if (i < 0)
                goto alloc_fail7;
 
-       if (cfd) { /* export the country data */
+       if (h.usb_cdc_country_functional_desc) { /* export the country data */
+               struct usb_cdc_country_functional_desc * cfd =
+                                       h.usb_cdc_country_functional_desc;
+
                acm->country_codes = kmalloc(cfd->bLength - 4, GFP_KERNEL);
                if (!acm->country_codes)
                        goto skip_countries;
@@ -1572,6 +1524,7 @@ alloc_fail4:
        usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
 alloc_fail2:
        acm_release_minor(acm);
+alloc_fail1:
        kfree(acm);
 alloc_fail:
        return rv;
index 61ea87917433096a73cb2bc3ef7f3f3a857e5caa..337948c42110a82c8564be326ad0e65ebb848080 100644 (file)
@@ -875,38 +875,18 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
        int rv = -EINVAL;
        struct usb_host_interface *iface;
        struct usb_endpoint_descriptor *ep;
-       struct usb_cdc_dmm_desc *dmhd;
+       struct usb_cdc_parsed_header hdr;
        u8 *buffer = intf->altsetting->extra;
        int buflen = intf->altsetting->extralen;
        u16 maxcom = WDM_DEFAULT_BUFSIZE;
 
        if (!buffer)
                goto err;
-       while (buflen > 2) {
-               if (buffer[1] != USB_DT_CS_INTERFACE) {
-                       dev_err(&intf->dev, "skipping garbage\n");
-                       goto next_desc;
-               }
 
-               switch (buffer[2]) {
-               case USB_CDC_HEADER_TYPE:
-                       break;
-               case USB_CDC_DMM_TYPE:
-                       dmhd = (struct usb_cdc_dmm_desc *)buffer;
-                       maxcom = le16_to_cpu(dmhd->wMaxCommand);
-                       dev_dbg(&intf->dev,
-                               "Finding maximum buffer length: %d", maxcom);
-                       break;
-               default:
-                       dev_err(&intf->dev,
-                               "Ignoring extra header, type %d, length %d\n",
-                               buffer[2], buffer[0]);
-                       break;
-               }
-next_desc:
-               buflen -= buffer[0];
-               buffer += buffer[0];
-       }
+       cdc_parse_cdc_header(&hdr, intf, buffer, buflen);
+
+       if (hdr.usb_cdc_dmm_desc)
+               maxcom = le16_to_cpu(hdr.usb_cdc_dmm_desc->wMaxCommand);
 
        iface = intf->cur_altsetting;
        if (iface->desc.bNumEndpoints != 1)
index e3d01619d6b381b4562545e73f99499554d1002e..5ef8da6e67c3fbc1ab82f8e7e24550ff4ef05ca1 100644 (file)
@@ -131,15 +131,17 @@ EXPORT_SYMBOL_GPL(usb_get_dr_mode);
  * of_usb_get_dr_mode_by_phy - Get dual role mode for the controller device
  * which is associated with the given phy device_node
  * @np:        Pointer to the given phy device_node
+ * @arg0: phandle args[0] for phy's with #phy-cells >= 1, or -1 for
+ *        phys which do not have phy-cells
  *
  * In dts a usb controller associates with phy devices.  The function gets
  * the string from property 'dr_mode' of the controller associated with the
  * given phy device node, and returns the correspondig enum usb_dr_mode.
  */
-enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
+enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
 {
        struct device_node *controller = NULL;
-       struct device_node *phy;
+       struct of_phandle_args args;
        const char *dr_mode;
        int index;
        int err;
@@ -148,12 +150,24 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
                controller = of_find_node_with_property(controller, "phys");
                index = 0;
                do {
-                       phy = of_parse_phandle(controller, "phys", index);
-                       of_node_put(phy);
-                       if (phy == phy_np)
+                       if (arg0 == -1) {
+                               args.np = of_parse_phandle(controller, "phys",
+                                                       index);
+                               args.args_count = 0;
+                       } else {
+                               err = of_parse_phandle_with_args(controller,
+                                                       "phys", "#phy-cells",
+                                                       index, &args);
+                               if (err)
+                                       break;
+                       }
+
+                       of_node_put(args.np);
+                       if (args.np == np && (args.args_count == 0 ||
+                                             args.args[0] == arg0))
                                goto finish;
                        index++;
-               } while (phy);
+               } while (args.np);
        } while (controller);
 
 finish:
index ea681f157368f245772ab145073594bf4f6fb273..0406a59f05510cb1dee98ffbf22b55c0ccd65750 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/nls.h>
 #include <linux/device.h>
 #include <linux/scatterlist.h>
+#include <linux/usb/cdc.h>
 #include <linux/usb/quirks.h>
 #include <linux/usb/hcd.h>     /* for usbcore internals */
 #include <asm/byteorder.h>
@@ -2023,3 +2024,155 @@ int usb_driver_set_configuration(struct usb_device *udev, int config)
        return 0;
 }
 EXPORT_SYMBOL_GPL(usb_driver_set_configuration);
+
+/**
+ * cdc_parse_cdc_header - parse the extra headers present in CDC devices
+ * @hdr: the place to put the results of the parsing
+ * @intf: the interface for which parsing is requested
+ * @buffer: pointer to the extra headers to be parsed
+ * @buflen: length of the extra headers
+ *
+ * This evaluates the extra headers present in CDC devices which
+ * bind the interfaces for data and control and provide details
+ * about the capabilities of the device.
+ *
+ * Return: number of descriptors parsed or -EINVAL
+ * if the header is contradictory beyond salvage
+ */
+
+int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
+                               struct usb_interface *intf,
+                               u8 *buffer,
+                               int buflen)
+{
+       /* duplicates are ignored */
+       struct usb_cdc_union_desc *union_header = NULL;
+
+       /* duplicates are not tolerated */
+       struct usb_cdc_header_desc *header = NULL;
+       struct usb_cdc_ether_desc *ether = NULL;
+       struct usb_cdc_mdlm_detail_desc *detail = NULL;
+       struct usb_cdc_mdlm_desc *desc = NULL;
+
+       unsigned int elength;
+       int cnt = 0;
+
+       memset(hdr, 0x00, sizeof(struct usb_cdc_parsed_header));
+       hdr->phonet_magic_present = false;
+       while (buflen > 0) {
+               elength = buffer[0];
+               if (!elength) {
+                       dev_err(&intf->dev, "skipping garbage byte\n");
+                       elength = 1;
+                       goto next_desc;
+               }
+               if (buffer[1] != USB_DT_CS_INTERFACE) {
+                       dev_err(&intf->dev, "skipping garbage\n");
+                       goto next_desc;
+               }
+
+               switch (buffer[2]) {
+               case USB_CDC_UNION_TYPE: /* we've found it */
+                       if (elength < sizeof(struct usb_cdc_union_desc))
+                               goto next_desc;
+                       if (union_header) {
+                               dev_err(&intf->dev, "More than one union descriptor, skipping ...\n");
+                               goto next_desc;
+                       }
+                       union_header = (struct usb_cdc_union_desc *)buffer;
+                       break;
+               case USB_CDC_COUNTRY_TYPE:
+                       if (elength < sizeof(struct usb_cdc_country_functional_desc))
+                               goto next_desc;
+                       hdr->usb_cdc_country_functional_desc =
+                               (struct usb_cdc_country_functional_desc *)buffer;
+                       break;
+               case USB_CDC_HEADER_TYPE:
+                       if (elength != sizeof(struct usb_cdc_header_desc))
+                               goto next_desc;
+                       if (header)
+                               return -EINVAL;
+                       header = (struct usb_cdc_header_desc *)buffer;
+                       break;
+               case USB_CDC_ACM_TYPE:
+                       if (elength < sizeof(struct usb_cdc_acm_descriptor))
+                               goto next_desc;
+                       hdr->usb_cdc_acm_descriptor =
+                               (struct usb_cdc_acm_descriptor *)buffer;
+                       break;
+               case USB_CDC_ETHERNET_TYPE:
+                       if (elength != sizeof(struct usb_cdc_ether_desc))
+                               goto next_desc;
+                       if (ether)
+                               return -EINVAL;
+                       ether = (struct usb_cdc_ether_desc *)buffer;
+                       break;
+               case USB_CDC_CALL_MANAGEMENT_TYPE:
+                       if (elength < sizeof(struct usb_cdc_call_mgmt_descriptor))
+                               goto next_desc;
+                       hdr->usb_cdc_call_mgmt_descriptor =
+                               (struct usb_cdc_call_mgmt_descriptor *)buffer;
+                       break;
+               case USB_CDC_DMM_TYPE:
+                       if (elength < sizeof(struct usb_cdc_dmm_desc))
+                               goto next_desc;
+                       hdr->usb_cdc_dmm_desc =
+                               (struct usb_cdc_dmm_desc *)buffer;
+                       break;
+               case USB_CDC_MDLM_TYPE:
+                       if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+                               goto next_desc;
+                       if (desc)
+                               return -EINVAL;
+                       desc = (struct usb_cdc_mdlm_desc *)buffer;
+                       break;
+               case USB_CDC_MDLM_DETAIL_TYPE:
+                       if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+                               goto next_desc;
+                       if (detail)
+                               return -EINVAL;
+                       detail = (struct usb_cdc_mdlm_detail_desc *)buffer;
+                       break;
+               case USB_CDC_NCM_TYPE:
+                       if (elength < sizeof(struct usb_cdc_ncm_desc))
+                               goto next_desc;
+                       hdr->usb_cdc_ncm_desc = (struct usb_cdc_ncm_desc *)buffer;
+                       break;
+               case USB_CDC_MBIM_TYPE:
+                       if (elength < sizeof(struct usb_cdc_mbim_desc))
+                               goto next_desc;
+
+                       hdr->usb_cdc_mbim_desc = (struct usb_cdc_mbim_desc *)buffer;
+                       break;
+               case USB_CDC_MBIM_EXTENDED_TYPE:
+                       if (elength < sizeof(struct usb_cdc_mbim_extended_desc))
+                               break;
+                       hdr->usb_cdc_mbim_extended_desc =
+                               (struct usb_cdc_mbim_extended_desc *)buffer;
+                       break;
+               case CDC_PHONET_MAGIC_NUMBER:
+                       hdr->phonet_magic_present = true;
+                       break;
+               default:
+                       /*
+                        * there are LOTS more CDC descriptors that
+                        * could legitimately be found here.
+                        */
+                       dev_dbg(&intf->dev, "Ignoring descriptor: type %02x, length %ud\n",
+                                       buffer[2], elength);
+                       goto next_desc;
+               }
+               cnt++;
+next_desc:
+               buflen -= elength;
+               buffer += elength;
+       }
+       hdr->usb_cdc_union_desc = union_header;
+       hdr->usb_cdc_header_desc = header;
+       hdr->usb_cdc_mdlm_detail_desc = detail;
+       hdr->usb_cdc_mdlm_desc = desc;
+       hdr->usb_cdc_ether_desc = ether;
+       return cnt;
+}
+
+EXPORT_SYMBOL(cdc_parse_cdc_header);
index 944a6dca0fcb50298ce1b0c21f9f7cd36c1707ca..d2e50a27140c9254be2a80b6c6ae69bc71a93b4a 100644 (file)
@@ -128,6 +128,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x04f3, 0x016f), .driver_info =
                        USB_QUIRK_DEVICE_QUALIFIER },
 
+       { USB_DEVICE(0x04f3, 0x0381), .driver_info =
+                       USB_QUIRK_NO_LPM },
+
        { USB_DEVICE(0x04f3, 0x21b8), .driver_info =
                        USB_QUIRK_DEVICE_QUALIFIER },
 
index c1f29caa8990af6e4b9440a5bacb19d0313ced78..e838701d6dd54e624c356171833b929889027c38 100644 (file)
@@ -55,6 +55,7 @@ endchoice
 config USB_DWC2_PCI
        tristate "DWC2 PCI"
        depends on PCI
+       depends on USB_GADGET || !USB_GADGET
        default n
        select NOP_USB_XCEIV
        help
index dec0b21fc62687d700ffc3b040ec16a70a530710..9fae0291cd69f8c3314f547eeb9d0557af517e49 100644 (file)
@@ -166,7 +166,7 @@ struct dwc2_hsotg_req;
  *          means that it is sending data to the Host.
  * @index: The index for the endpoint registers.
  * @mc: Multi Count - number of transactions per microframe
- * @interval - Interval for periodic endpoints
+ * @interval - Interval for periodic endpoints, in frames or microframes.
  * @name: The name array passed to the USB core.
  * @halted: Set if the endpoint has been halted.
  * @periodic: Set if this is a periodic ep, such as Interrupt
@@ -177,6 +177,8 @@ struct dwc2_hsotg_req;
  * @fifo_load: The amount of data loaded into the FIFO (periodic IN)
  * @last_load: The offset of data for the last start of request.
  * @size_loaded: The last loaded size for DxEPTSIZE for periodic IN
+ * @target_frame: Targeted frame num to setup next ISOC transfer
+ * @frame_overrun: Indicates SOF number overrun in DSTS
  *
  * This is the driver's state for each registered enpoint, allowing it
  * to keep track of transactions that need doing. Each endpoint has a
@@ -213,7 +215,9 @@ struct dwc2_hsotg_ep {
        unsigned int            periodic:1;
        unsigned int            isochronous:1;
        unsigned int            send_zlp:1;
-       unsigned int            has_correct_parity:1;
+       unsigned int            target_frame;
+#define TARGET_FRAME_INITIAL   0xFFFFFFFF
+       bool                    frame_overrun;
 
        char                    name[10];
 };
index 26cf09d0fe3ce330ed65ac8dfa3dc4c38d809769..af46adfae41ca5fd92b1401eff078e84bf27c7aa 100644 (file)
@@ -96,6 +96,25 @@ static inline bool using_dma(struct dwc2_hsotg *hsotg)
        return hsotg->g_using_dma;
 }
 
+/**
+ * dwc2_gadget_incr_frame_num - Increments the targeted frame number.
+ * @hs_ep: The endpoint
+ * @increment: The value to increment by
+ *
+ * This function will also check if the frame number overruns DSTS_SOFFN_LIMIT.
+ * If an overrun occurs it will wrap the value and set the frame_overrun flag.
+ */
+static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
+{
+       hs_ep->target_frame += hs_ep->interval;
+       if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
+               hs_ep->frame_overrun = 1;
+               hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
+       } else {
+               hs_ep->frame_overrun = 0;
+       }
+}
+
 /**
  * dwc2_hsotg_en_gsint - enable one or more of the general interrupt
  * @hsotg: The device state
@@ -503,6 +522,23 @@ static unsigned get_ep_limit(struct dwc2_hsotg_ep *hs_ep)
        return maxsize;
 }
 
+/**
+* dwc2_hsotg_read_frameno - read current frame number
+* @hsotg: The device instance
+*
+* Return the current frame number
+*/
+static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
+{
+       u32 dsts;
+
+       dsts = dwc2_readl(hsotg->regs + DSTS);
+       dsts &= DSTS_SOFFN_MASK;
+       dsts >>= DSTS_SOFFN_SHIFT;
+
+       return dsts;
+}
+
 /**
  * dwc2_hsotg_start_req - start a USB request from an endpoint's queue
  * @hsotg: The controller state.
@@ -631,8 +667,17 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
                        __func__, &ureq->dma, dma_reg);
        }
 
+       if (hs_ep->isochronous && hs_ep->interval == 1) {
+               hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+               dwc2_gadget_incr_frame_num(hs_ep);
+
+               if (hs_ep->target_frame & 0x1)
+                       ctrl |= DXEPCTL_SETODDFR;
+               else
+                       ctrl |= DXEPCTL_SETEVENFR;
+       }
+
        ctrl |= DXEPCTL_EPENA;  /* ensure ep enabled */
-       ctrl |= DXEPCTL_USBACTEP;
 
        dev_dbg(hsotg->dev, "ep0 state:%d\n", hsotg->ep0_state);
 
@@ -658,14 +703,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
                dwc2_hsotg_write_fifo(hsotg, hs_ep, hs_req);
        }
 
-       /*
-        * clear the INTknTXFEmpMsk when we start request, more as a aide
-        * to debugging to see what is going on.
-        */
-       if (dir_in)
-               dwc2_writel(DIEPMSK_INTKNTXFEMPMSK,
-                      hsotg->regs + DIEPINT(index));
-
        /*
         * Note, trying to clear the NAK here causes problems with transmit
         * on the S3C6400 ending up with the TXFIFO becoming full.
@@ -773,6 +810,30 @@ static void dwc2_hsotg_handle_unaligned_buf_complete(struct dwc2_hsotg *hsotg,
        hs_req->saved_req_buf = NULL;
 }
 
+/**
+ * dwc2_gadget_target_frame_elapsed - Checks target frame
+ * @hs_ep: The driver endpoint to check
+ *
+ * Returns 1 if targeted frame elapsed. If returned 1 then we need to drop
+ * corresponding transfer.
+ */
+static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
+{
+       struct dwc2_hsotg *hsotg = hs_ep->parent;
+       u32 target_frame = hs_ep->target_frame;
+       u32 current_frame = dwc2_hsotg_read_frameno(hsotg);
+       bool frame_overrun = hs_ep->frame_overrun;
+
+       if (!frame_overrun && current_frame >= target_frame)
+               return true;
+
+       if (frame_overrun && current_frame >= target_frame &&
+           ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
+               return true;
+
+       return false;
+}
+
 static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
                              gfp_t gfp_flags)
 {
@@ -812,9 +873,18 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
        first = list_empty(&hs_ep->queue);
        list_add_tail(&hs_req->queue, &hs_ep->queue);
 
-       if (first)
-               dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+       if (first) {
+               if (!hs_ep->isochronous) {
+                       dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+                       return 0;
+               }
+
+               while (dwc2_gadget_target_frame_elapsed(hs_ep))
+                       dwc2_gadget_incr_frame_num(hs_ep);
 
+               if (hs_ep->target_frame != TARGET_FRAME_INITIAL)
+                       dwc2_hsotg_start_req(hs, hs_ep, hs_req, false);
+       }
        return 0;
 }
 
@@ -1034,6 +1104,42 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
        return list_first_entry(&hs_ep->queue, struct dwc2_hsotg_req, queue);
 }
 
+/**
+ * dwc2_gadget_start_next_request - Starts next request from ep queue
+ * @hs_ep: Endpoint structure
+ *
+ * If queue is empty and EP is ISOC-OUT - unmasks OUTTKNEPDIS which is masked
+ * in its handler. Hence we need to unmask it here to be able to do
+ * resynchronization.
+ */
+static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
+{
+       u32 mask;
+       struct dwc2_hsotg *hsotg = hs_ep->parent;
+       int dir_in = hs_ep->dir_in;
+       struct dwc2_hsotg_req *hs_req;
+       u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
+
+       if (!list_empty(&hs_ep->queue)) {
+               hs_req = get_ep_head(hs_ep);
+               dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
+               return;
+       }
+       if (!hs_ep->isochronous)
+               return;
+
+       if (dir_in) {
+               dev_dbg(hsotg->dev, "%s: No more ISOC-IN requests\n",
+                       __func__);
+       } else {
+               dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
+                       __func__);
+               mask = dwc2_readl(hsotg->regs + epmsk_reg);
+               mask |= DOEPMSK_OUTTKNEPDISMSK;
+               dwc2_writel(mask, hsotg->regs + epmsk_reg);
+       }
+}
+
 /**
  * dwc2_hsotg_process_req_feature - process request {SET,CLEAR}_FEATURE
  * @hsotg: The device state
@@ -1044,7 +1150,6 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
 {
        struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
        struct dwc2_hsotg_req *hs_req;
-       bool restart;
        bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
        struct dwc2_hsotg_ep *ep;
        int ret;
@@ -1127,12 +1232,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
 
                                /* If we have pending request, then start it */
                                if (!ep->req) {
-                                       restart = !list_empty(&ep->queue);
-                                       if (restart) {
-                                               hs_req = get_ep_head(ep);
-                                               dwc2_hsotg_start_req(hsotg, ep,
-                                                               hs_req, false);
-                                       }
+                                       dwc2_gadget_start_next_request(ep);
                                }
                        }
 
@@ -1373,7 +1473,6 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
                                       struct dwc2_hsotg_req *hs_req,
                                       int result)
 {
-       bool restart;
 
        if (!hs_req) {
                dev_dbg(hsotg->dev, "%s: nothing to complete?\n", __func__);
@@ -1417,11 +1516,7 @@ static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
         */
 
        if (!hs_ep->req && result >= 0) {
-               restart = !list_empty(&hs_ep->queue);
-               if (restart) {
-                       hs_req = get_ep_head(hs_ep);
-                       dwc2_hsotg_start_req(hsotg, hs_ep, hs_req, false);
-               }
+               dwc2_gadget_start_next_request(hs_ep);
        }
 }
 
@@ -1597,31 +1692,15 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
         * adjust the ISOC parity here.
         */
        if (!using_dma(hsotg)) {
-               hs_ep->has_correct_parity = 1;
                if (hs_ep->isochronous && hs_ep->interval == 1)
                        dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
+               else if (hs_ep->isochronous && hs_ep->interval > 1)
+                       dwc2_gadget_incr_frame_num(hs_ep);
        }
 
        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
 }
 
-/**
- * dwc2_hsotg_read_frameno - read current frame number
- * @hsotg: The device instance
- *
- * Return the current frame number
- */
-static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg)
-{
-       u32 dsts;
-
-       dsts = dwc2_readl(hsotg->regs + DSTS);
-       dsts &= DSTS_SOFFN_MASK;
-       dsts >>= DSTS_SOFFN_SHIFT;
-
-       return dsts;
-}
-
 /**
  * dwc2_hsotg_handle_rx - RX FIFO has data
  * @hsotg: The device instance
@@ -1936,6 +2015,190 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
        dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
 }
 
+/**
+ * dwc2_gadget_read_ep_interrupts - reads interrupts for given ep
+ * @hsotg: The device state.
+ * @idx: Index of ep.
+ * @dir_in: Endpoint direction 1-in 0-out.
+ *
+ * Reads for endpoint with given index and direction, by masking
+ * epint_reg with coresponding mask.
+ */
+static u32 dwc2_gadget_read_ep_interrupts(struct dwc2_hsotg *hsotg,
+                                         unsigned int idx, int dir_in)
+{
+       u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
+       u32 epint_reg = dir_in ? DIEPINT(idx) : DOEPINT(idx);
+       u32 ints;
+       u32 mask;
+       u32 diepempmsk;
+
+       mask = dwc2_readl(hsotg->regs + epmsk_reg);
+       diepempmsk = dwc2_readl(hsotg->regs + DIEPEMPMSK);
+       mask |= ((diepempmsk >> idx) & 0x1) ? DIEPMSK_TXFIFOEMPTY : 0;
+       mask |= DXEPINT_SETUP_RCVD;
+
+       ints = dwc2_readl(hsotg->regs + epint_reg);
+       ints &= mask;
+       return ints;
+}
+
+/**
+ * dwc2_gadget_handle_ep_disabled - handle DXEPINT_EPDISBLD
+ * @hs_ep: The endpoint on which interrupt is asserted.
+ *
+ * This interrupt indicates that the endpoint has been disabled per the
+ * application's request.
+ *
+ * For IN endpoints flushes txfifo, in case of BULK clears DCTL_CGNPINNAK,
+ * in case of ISOC completes current request.
+ *
+ * For ISOC-OUT endpoints completes expired requests. If there is remaining
+ * request starts it.
+ */
+static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
+{
+       struct dwc2_hsotg *hsotg = hs_ep->parent;
+       struct dwc2_hsotg_req *hs_req;
+       unsigned char idx = hs_ep->index;
+       int dir_in = hs_ep->dir_in;
+       u32 epctl_reg = dir_in ? DIEPCTL(idx) : DOEPCTL(idx);
+       int dctl = dwc2_readl(hsotg->regs + DCTL);
+
+       dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+
+       if (dir_in) {
+               int epctl = dwc2_readl(hsotg->regs + epctl_reg);
+
+               dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+               if (hs_ep->isochronous) {
+                       dwc2_hsotg_complete_in(hsotg, hs_ep);
+                       return;
+               }
+
+               if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
+                       int dctl = dwc2_readl(hsotg->regs + DCTL);
+
+                       dctl |= DCTL_CGNPINNAK;
+                       dwc2_writel(dctl, hsotg->regs + DCTL);
+               }
+               return;
+       }
+
+       if (dctl & DCTL_GOUTNAKSTS) {
+               dctl |= DCTL_CGOUTNAK;
+               dwc2_writel(dctl, hsotg->regs + DCTL);
+       }
+
+       if (!hs_ep->isochronous)
+               return;
+
+       if (list_empty(&hs_ep->queue)) {
+               dev_dbg(hsotg->dev, "%s: complete_ep 0x%p, ep->queue empty!\n",
+                       __func__, hs_ep);
+               return;
+       }
+
+       do {
+               hs_req = get_ep_head(hs_ep);
+               if (hs_req)
+                       dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req,
+                                                   -ENODATA);
+               dwc2_gadget_incr_frame_num(hs_ep);
+       } while (dwc2_gadget_target_frame_elapsed(hs_ep));
+
+       dwc2_gadget_start_next_request(hs_ep);
+}
+
+/**
+ * dwc2_gadget_handle_out_token_ep_disabled - handle DXEPINT_OUTTKNEPDIS
+ * @hs_ep: The endpoint on which interrupt is asserted.
+ *
+ * This is starting point for ISOC-OUT transfer, synchronization done with
+ * first out token received from host while corresponding EP is disabled.
+ *
+ * Device does not know initial frame in which out token will come. For this
+ * HW generates OUTTKNEPDIS - out token is received while EP is disabled. Upon
+ * getting this interrupt SW starts calculation for next transfer frame.
+ */
+static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
+{
+       struct dwc2_hsotg *hsotg = ep->parent;
+       int dir_in = ep->dir_in;
+       u32 doepmsk;
+
+       if (dir_in || !ep->isochronous)
+               return;
+
+       dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), -ENODATA);
+
+       if (ep->interval > 1 &&
+           ep->target_frame == TARGET_FRAME_INITIAL) {
+               u32 dsts;
+               u32 ctrl;
+
+               dsts = dwc2_readl(hsotg->regs + DSTS);
+               ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+               dwc2_gadget_incr_frame_num(ep);
+
+               ctrl = dwc2_readl(hsotg->regs + DOEPCTL(ep->index));
+               if (ep->target_frame & 0x1)
+                       ctrl |= DXEPCTL_SETODDFR;
+               else
+                       ctrl |= DXEPCTL_SETEVENFR;
+
+               dwc2_writel(ctrl, hsotg->regs + DOEPCTL(ep->index));
+       }
+
+       dwc2_gadget_start_next_request(ep);
+       doepmsk = dwc2_readl(hsotg->regs + DOEPMSK);
+       doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
+       dwc2_writel(doepmsk, hsotg->regs + DOEPMSK);
+}
+
+/**
+* dwc2_gadget_handle_nak - handle NAK interrupt
+* @hs_ep: The endpoint on which interrupt is asserted.
+*
+* This is starting point for ISOC-IN transfer, synchronization done with
+* first IN token received from host while corresponding EP is disabled.
+*
+* Device does not know when first one token will arrive from host. On first
+* token arrival HW generates 2 interrupts: 'in token received while FIFO empty'
+* and 'NAK'. NAK interrupt for ISOC-IN means that token has arrived and ZLP was
+* sent in response to that as there was no data in FIFO. SW is basing on this
+* interrupt to obtain frame in which token has come and then based on the
+* interval calculates next frame for transfer.
+*/
+static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
+{
+       struct dwc2_hsotg *hsotg = hs_ep->parent;
+       int dir_in = hs_ep->dir_in;
+
+       if (!dir_in || !hs_ep->isochronous)
+               return;
+
+       if (hs_ep->target_frame == TARGET_FRAME_INITIAL) {
+               hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
+               if (hs_ep->interval > 1) {
+                       u32 ctrl = dwc2_readl(hsotg->regs +
+                                             DIEPCTL(hs_ep->index));
+                       if (hs_ep->target_frame & 0x1)
+                               ctrl |= DXEPCTL_SETODDFR;
+                       else
+                               ctrl |= DXEPCTL_SETEVENFR;
+
+                       dwc2_writel(ctrl, hsotg->regs + DIEPCTL(hs_ep->index));
+               }
+
+               dwc2_hsotg_complete_request(hsotg, hs_ep,
+                                           get_ep_head(hs_ep), 0);
+       }
+
+       dwc2_gadget_incr_frame_num(hs_ep);
+}
+
 /**
  * dwc2_hsotg_epint - handle an in/out endpoint interrupt
  * @hsotg: The driver state
@@ -1954,7 +2217,7 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
        u32 ints;
        u32 ctrl;
 
-       ints = dwc2_readl(hsotg->regs + epint_reg);
+       ints = dwc2_gadget_read_ep_interrupts(hsotg, idx, dir_in);
        ctrl = dwc2_readl(hsotg->regs + epctl_reg);
 
        /* Clear endpoint interrupts */
@@ -1973,11 +2236,10 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
        if (idx == 0 && (ints & (DXEPINT_SETUP | DXEPINT_SETUP_RCVD)))
                ints &= ~DXEPINT_XFERCOMPL;
 
-       if (ints & DXEPINT_XFERCOMPL) {
-               hs_ep->has_correct_parity = 1;
-               if (hs_ep->isochronous && hs_ep->interval == 1)
-                       dwc2_hsotg_change_ep_iso_parity(hsotg, epctl_reg);
+       if (ints & DXEPINT_STSPHSERCVD)
+               dev_dbg(hsotg->dev, "%s: StsPhseRcvd asserted\n", __func__);
 
+       if (ints & DXEPINT_XFERCOMPL) {
                dev_dbg(hsotg->dev,
                        "%s: XferCompl: DxEPCTL=0x%08x, DXEPTSIZ=%08x\n",
                        __func__, dwc2_readl(hsotg->regs + epctl_reg),
@@ -1988,7 +2250,12 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
                 * at completing IN requests here
                 */
                if (dir_in) {
+                       if (hs_ep->isochronous && hs_ep->interval > 1)
+                               dwc2_gadget_incr_frame_num(hs_ep);
+
                        dwc2_hsotg_complete_in(hsotg, hs_ep);
+                       if (ints & DXEPINT_NAKINTRPT)
+                               ints &= ~DXEPINT_NAKINTRPT;
 
                        if (idx == 0 && !hs_ep->req)
                                dwc2_hsotg_enqueue_setup(hsotg);
@@ -1997,28 +2264,21 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
                         * We're using DMA, we need to fire an OutDone here
                         * as we ignore the RXFIFO.
                         */
+                       if (hs_ep->isochronous && hs_ep->interval > 1)
+                               dwc2_gadget_incr_frame_num(hs_ep);
 
                        dwc2_hsotg_handle_outdone(hsotg, idx);
                }
        }
 
-       if (ints & DXEPINT_EPDISBLD) {
-               dev_dbg(hsotg->dev, "%s: EPDisbld\n", __func__);
+       if (ints & DXEPINT_EPDISBLD)
+               dwc2_gadget_handle_ep_disabled(hs_ep);
 
-               if (dir_in) {
-                       int epctl = dwc2_readl(hsotg->regs + epctl_reg);
+       if (ints & DXEPINT_OUTTKNEPDIS)
+               dwc2_gadget_handle_out_token_ep_disabled(hs_ep);
 
-                       dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
-
-                       if ((epctl & DXEPCTL_STALL) &&
-                               (epctl & DXEPCTL_EPTYPE_BULK)) {
-                               int dctl = dwc2_readl(hsotg->regs + DCTL);
-
-                               dctl |= DCTL_CGNPINNAK;
-                               dwc2_writel(dctl, hsotg->regs + DCTL);
-                       }
-               }
-       }
+       if (ints & DXEPINT_NAKINTRPT)
+               dwc2_gadget_handle_nak(hs_ep);
 
        if (ints & DXEPINT_AHBERR)
                dev_dbg(hsotg->dev, "%s: AHBErr\n", __func__);
@@ -2046,20 +2306,20 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
 
        if (dir_in && !hs_ep->isochronous) {
                /* not sure if this is important, but we'll clear it anyway */
-               if (ints & DIEPMSK_INTKNTXFEMPMSK) {
+               if (ints & DXEPINT_INTKNTXFEMP) {
                        dev_dbg(hsotg->dev, "%s: ep%d: INTknTXFEmpMsk\n",
                                __func__, idx);
                }
 
                /* this probably means something bad is happening */
-               if (ints & DIEPMSK_INTKNEPMISMSK) {
+               if (ints & DXEPINT_INTKNEPMIS) {
                        dev_warn(hsotg->dev, "%s: ep%d: INTknEP\n",
                                 __func__, idx);
                }
 
                /* FIFO has space or is empty (see GAHBCFG) */
                if (hsotg->dedicated_fifos &&
-                   ints & DIEPMSK_TXFIFOEMPTY) {
+                   ints & DXEPINT_TXFEMP) {
                        dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n",
                                __func__, idx);
                        if (!using_dma(hsotg))
@@ -2322,18 +2582,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
        dwc2_writel(((hsotg->dedicated_fifos && !using_dma(hsotg)) ?
                DIEPMSK_TXFIFOEMPTY | DIEPMSK_INTKNTXFEMPMSK : 0) |
                DIEPMSK_EPDISBLDMSK | DIEPMSK_XFERCOMPLMSK |
-               DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK |
-               DIEPMSK_INTKNEPMISMSK,
+               DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK,
                hsotg->regs + DIEPMSK);
 
        /*
         * don't need XferCompl, we get that from RXFIFO in slave mode. In
         * DMA mode we may need this.
         */
-       dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK |
-                                   DIEPMSK_TIMEOUTMSK) : 0) |
+       dwc2_writel((using_dma(hsotg) ? (DIEPMSK_XFERCOMPLMSK) : 0) |
                DOEPMSK_EPDISBLDMSK | DOEPMSK_AHBERRMSK |
-               DOEPMSK_SETUPMSK,
+               DOEPMSK_SETUPMSK | DOEPMSK_STSPHSERCVDMSK,
                hsotg->regs + DOEPMSK);
 
        dwc2_writel(0, hsotg->regs + DAINTMSK);
@@ -2413,6 +2671,85 @@ void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg)
        __bic32(hsotg->regs + DCTL, DCTL_SFTDISCON);
 }
 
+/**
+ * dwc2_gadget_handle_incomplete_isoc_in - handle incomplete ISO IN Interrupt.
+ * @hsotg: The device state:
+ *
+ * This interrupt indicates one of the following conditions occurred while
+ * transmitting an ISOC transaction.
+ * - Corrupted IN Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ *
+ * The following actions will be taken:
+ * - Determine the EP
+ * - Disable EP; when 'Endpoint Disabled' interrupt is received Flush FIFO
+ */
+static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
+{
+       struct dwc2_hsotg_ep *hs_ep;
+       u32 epctrl;
+       u32 idx;
+
+       dev_dbg(hsotg->dev, "Incomplete isoc in interrupt received:\n");
+
+       for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
+               hs_ep = hsotg->eps_in[idx];
+               epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
+               if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous &&
+                   dwc2_gadget_target_frame_elapsed(hs_ep)) {
+                       epctrl |= DXEPCTL_SNAK;
+                       epctrl |= DXEPCTL_EPDIS;
+                       dwc2_writel(epctrl, hsotg->regs + DIEPCTL(idx));
+               }
+       }
+
+       /* Clear interrupt */
+       dwc2_writel(GINTSTS_INCOMPL_SOIN, hsotg->regs + GINTSTS);
+}
+
+/**
+ * dwc2_gadget_handle_incomplete_isoc_out - handle incomplete ISO OUT Interrupt
+ * @hsotg: The device state:
+ *
+ * This interrupt indicates one of the following conditions occurred while
+ * transmitting an ISOC transaction.
+ * - Corrupted OUT Token for ISOC EP.
+ * - Packet not complete in FIFO.
+ *
+ * The following actions will be taken:
+ * - Determine the EP
+ * - Set DCTL_SGOUTNAK and unmask GOUTNAKEFF if target frame elapsed.
+ */
+static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
+{
+       u32 gintsts;
+       u32 gintmsk;
+       u32 epctrl;
+       struct dwc2_hsotg_ep *hs_ep;
+       int idx;
+
+       dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
+
+       for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
+               hs_ep = hsotg->eps_out[idx];
+               epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
+               if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous &&
+                   dwc2_gadget_target_frame_elapsed(hs_ep)) {
+                       /* Unmask GOUTNAKEFF interrupt */
+                       gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+                       gintmsk |= GINTSTS_GOUTNAKEFF;
+                       dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
+
+                       gintsts = dwc2_readl(hsotg->regs + GINTSTS);
+                       if (!(gintsts & GINTSTS_GOUTNAKEFF))
+                               __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+               }
+       }
+
+       /* Clear interrupt */
+       dwc2_writel(GINTSTS_INCOMPL_SOOUT, hsotg->regs + GINTSTS);
+}
+
 /**
  * dwc2_hsotg_irq - handle device interrupt
  * @irq: The IRQ number triggered
@@ -2545,11 +2882,29 @@ irq_retry:
         */
 
        if (gintsts & GINTSTS_GOUTNAKEFF) {
-               dev_info(hsotg->dev, "GOUTNakEff triggered\n");
+               u8 idx;
+               u32 epctrl;
+               u32 gintmsk;
+               struct dwc2_hsotg_ep *hs_ep;
 
-               __orr32(hsotg->regs + DCTL, DCTL_CGOUTNAK);
+               /* Mask this interrupt */
+               gintmsk = dwc2_readl(hsotg->regs + GINTMSK);
+               gintmsk &= ~GINTSTS_GOUTNAKEFF;
+               dwc2_writel(gintmsk, hsotg->regs + GINTMSK);
 
-               dwc2_hsotg_dump(hsotg);
+               dev_dbg(hsotg->dev, "GOUTNakEff triggered\n");
+               for (idx = 1; idx <= hsotg->num_of_eps; idx++) {
+                       hs_ep = hsotg->eps_out[idx];
+                       epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
+
+                       if ((epctrl & DXEPCTL_EPENA) && hs_ep->isochronous) {
+                               epctrl |= DXEPCTL_SNAK;
+                               epctrl |= DXEPCTL_EPDIS;
+                               dwc2_writel(epctrl, hsotg->regs + DOEPCTL(idx));
+                       }
+               }
+
+               /* This interrupt bit is cleared in DXEPINT_EPDISBLD handler */
        }
 
        if (gintsts & GINTSTS_GINNAKEFF) {
@@ -2560,39 +2915,11 @@ irq_retry:
                dwc2_hsotg_dump(hsotg);
        }
 
-       if (gintsts & GINTSTS_INCOMPL_SOIN) {
-               u32 idx, epctl_reg;
-               struct dwc2_hsotg_ep *hs_ep;
-
-               dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOIN\n", __func__);
-               for (idx = 1; idx < hsotg->num_of_eps; idx++) {
-                       hs_ep = hsotg->eps_in[idx];
-
-                       if (!hs_ep->isochronous || hs_ep->has_correct_parity)
-                               continue;
+       if (gintsts & GINTSTS_INCOMPL_SOIN)
+               dwc2_gadget_handle_incomplete_isoc_in(hsotg);
 
-                       epctl_reg = DIEPCTL(idx);
-                       dwc2_hsotg_change_ep_iso_parity(hsotg, epctl_reg);
-               }
-               dwc2_writel(GINTSTS_INCOMPL_SOIN, hsotg->regs + GINTSTS);
-       }
-
-       if (gintsts & GINTSTS_INCOMPL_SOOUT) {
-               u32 idx, epctl_reg;
-               struct dwc2_hsotg_ep *hs_ep;
-
-               dev_dbg(hsotg->dev, "%s: GINTSTS_INCOMPL_SOOUT\n", __func__);
-               for (idx = 1; idx < hsotg->num_of_eps; idx++) {
-                       hs_ep = hsotg->eps_out[idx];
-
-                       if (!hs_ep->isochronous || hs_ep->has_correct_parity)
-                               continue;
-
-                       epctl_reg = DOEPCTL(idx);
-                       dwc2_hsotg_change_ep_iso_parity(hsotg, epctl_reg);
-               }
-               dwc2_writel(GINTSTS_INCOMPL_SOOUT, hsotg->regs + GINTSTS);
-       }
+       if (gintsts & GINTSTS_INCOMPL_SOOUT)
+               dwc2_gadget_handle_incomplete_isoc_out(hsotg);
 
        /*
         * if we've had fifo events, we should try and go around the
@@ -2624,6 +2951,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
        u32 epctrl_reg;
        u32 epctrl;
        u32 mps;
+       u32 mask;
        unsigned int dir_in;
        unsigned int i, val, size;
        int ret = 0;
@@ -2666,15 +2994,6 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
         */
        epctrl |= DXEPCTL_USBACTEP;
 
-       /*
-        * set the NAK status on the endpoint, otherwise we might try and
-        * do something with data that we've yet got a request to process
-        * since the RXFIFO will take data for an endpoint even if the
-        * size register hasn't been set.
-        */
-
-       epctrl |= DXEPCTL_SNAK;
-
        /* update the endpoint state */
        dwc2_hsotg_set_ep_maxpacket(hsotg, hs_ep->index, mps, dir_in);
 
@@ -2683,18 +3002,24 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
        hs_ep->periodic = 0;
        hs_ep->halted = 0;
        hs_ep->interval = desc->bInterval;
-       hs_ep->has_correct_parity = 0;
-
-       if (hs_ep->interval > 1 && hs_ep->mc > 1)
-               dev_err(hsotg->dev, "MC > 1 when interval is not 1\n");
 
        switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
        case USB_ENDPOINT_XFER_ISOC:
                epctrl |= DXEPCTL_EPTYPE_ISO;
                epctrl |= DXEPCTL_SETEVENFR;
                hs_ep->isochronous = 1;
-               if (dir_in)
+               hs_ep->interval = 1 << (desc->bInterval - 1);
+               hs_ep->target_frame = TARGET_FRAME_INITIAL;
+               if (dir_in) {
                        hs_ep->periodic = 1;
+                       mask = dwc2_readl(hsotg->regs + DIEPMSK);
+                       mask |= DIEPMSK_NAKMSK;
+                       dwc2_writel(mask, hsotg->regs + DIEPMSK);
+               } else {
+                       mask = dwc2_readl(hsotg->regs + DOEPMSK);
+                       mask |= DOEPMSK_OUTTKNEPDISMSK;
+                       dwc2_writel(mask, hsotg->regs + DOEPMSK);
+               }
                break;
 
        case USB_ENDPOINT_XFER_BULK:
@@ -2705,6 +3030,9 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
                if (dir_in)
                        hs_ep->periodic = 1;
 
+               if (hsotg->gadget.speed == USB_SPEED_HIGH)
+                       hs_ep->interval = 1 << (desc->bInterval - 1);
+
                epctrl |= DXEPCTL_EPTYPE_INTERRUPT;
                break;
 
@@ -2758,7 +3086,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
        }
 
        /* for non control endpoints, set PID to D0 */
-       if (index)
+       if (index && !hs_ep->isochronous)
                epctrl |= DXEPCTL_SETD0PID;
 
        dev_dbg(hsotg->dev, "%s: write DxEPCTL=0x%08x\n",
@@ -2875,10 +3203,8 @@ static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
                        dev_warn(hsotg->dev,
                                "%s: timeout DIEPINT.NAKEFF\n", __func__);
        } else {
-               /* Clear any pending nak effect interrupt */
-               dwc2_writel(GINTSTS_GOUTNAKEFF, hsotg->regs + GINTSTS);
-
-               __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
+               if (!(dwc2_readl(hsotg->regs + GINTSTS) & GINTSTS_GOUTNAKEFF))
+                       __orr32(hsotg->regs + DCTL, DCTL_SGOUTNAK);
 
                /* Wait for global nak to take effect */
                if (dwc2_hsotg_wait_bit_set(hsotg, GINTSTS,
index b5c7793a2df2b1cdd7623f278d7faa161812de01..13754353251f1a2252a1cf743d6c19523859c478 100644 (file)
@@ -367,7 +367,8 @@ static void pmap_unschedule(unsigned long *map, int bits_per_period,
  * @fmt:   The format for printf.
  * @...:   The args for printf.
  */
-static void cat_printf(char **buf, size_t *size, const char *fmt, ...)
+static __printf(3, 4)
+void cat_printf(char **buf, size_t *size, const char *fmt, ...)
 {
        va_list args;
        int i;
index 281b57b36ab4444fd81bd6e4ae0e0fad3e4c29c2..efc3bcde2822aa59a9e59350e632c70a0a2f8841 100644 (file)
 #define DSTS_SUSPSTS                   (1 << 0)
 
 #define DIEPMSK                                HSOTG_REG(0x810)
+#define DIEPMSK_NAKMSK                 (1 << 13)
+#define DIEPMSK_BNAININTRMSK           (1 << 9)
+#define DIEPMSK_TXFIFOUNDRNMSK         (1 << 8)
 #define DIEPMSK_TXFIFOEMPTY            (1 << 7)
 #define DIEPMSK_INEPNAKEFFMSK          (1 << 6)
 #define DIEPMSK_INTKNEPMISMSK          (1 << 5)
 
 #define DOEPMSK                                HSOTG_REG(0x814)
 #define DOEPMSK_BACK2BACKSETUP         (1 << 6)
+#define DOEPMSK_STSPHSERCVDMSK         (1 << 5)
 #define DOEPMSK_OUTTKNEPDISMSK         (1 << 4)
 #define DOEPMSK_SETUPMSK               (1 << 3)
 #define DOEPMSK_AHBERRMSK              (1 << 2)
 #define DTKNQR2                                HSOTG_REG(0x824)
 #define DTKNQR3                                HSOTG_REG(0x830)
 #define DTKNQR4                                HSOTG_REG(0x834)
+#define DIEPEMPMSK                     HSOTG_REG(0x834)
 
 #define DVBUSDIS                       HSOTG_REG(0x828)
 #define DVBUSPULSE                     HSOTG_REG(0x82C)
 #define DIEPINT(_a)                    HSOTG_REG(0x908 + ((_a) * 0x20))
 #define DOEPINT(_a)                    HSOTG_REG(0xB08 + ((_a) * 0x20))
 #define DXEPINT_SETUP_RCVD             (1 << 15)
+#define DXEPINT_NYETINTRPT             (1 << 14)
+#define DXEPINT_NAKINTRPT              (1 << 13)
+#define DXEPINT_BBLEERRINTRPT          (1 << 12)
+#define DXEPINT_PKTDRPSTS              (1 << 11)
+#define DXEPINT_BNAINTR                        (1 << 9)
+#define DXEPINT_TXFIFOUNDRN            (1 << 8)
+#define DXEPINT_OUTPKTERR              (1 << 8)
+#define DXEPINT_TXFEMP                 (1 << 7)
 #define DXEPINT_INEPNAKEFF             (1 << 6)
 #define DXEPINT_BACK2BACKSETUP         (1 << 6)
 #define DXEPINT_INTKNEPMIS             (1 << 5)
+#define DXEPINT_STSPHSERCVD            (1 << 5)
 #define DXEPINT_INTKNTXFEMP            (1 << 4)
 #define DXEPINT_OUTTKNEPDIS            (1 << 4)
 #define DXEPINT_TIMEOUT                        (1 << 3)
index a590cd225bb75de935345f52af8494804c4dc60f..946643157b78988a5c45655d9afe55a91d960b3d 100644 (file)
 #include <linux/usb/of.h>
 #include <linux/usb/otg.h>
 
-#include "platform_data.h"
 #include "core.h"
 #include "gadget.h"
 #include "io.h"
 
 #include "debug.h"
 
-/* -------------------------------------------------------------------------- */
+#define DWC3_DEFAULT_AUTOSUSPEND_DELAY 5000 /* ms */
 
 void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
 {
@@ -149,9 +148,8 @@ static int dwc3_soft_reset(struct dwc3 *dwc)
 /*
  * dwc3_frame_length_adjustment - Adjusts frame length if required
  * @dwc3: Pointer to our controller context structure
- * @fladj: Value of GFLADJ_30MHZ to adjust frame length
  */
-static void dwc3_frame_length_adjustment(struct dwc3 *dwc, u32 fladj)
+static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
 {
        u32 reg;
        u32 dft;
@@ -159,15 +157,15 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc, u32 fladj)
        if (dwc->revision < DWC3_REVISION_250A)
                return;
 
-       if (fladj == 0)
+       if (dwc->fladj == 0)
                return;
 
        reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
        dft = reg & DWC3_GFLADJ_30MHZ_MASK;
-       if (!dev_WARN_ONCE(dwc->dev, dft == fladj,
+       if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj,
            "request value same as default, ignoring\n")) {
                reg &= ~DWC3_GFLADJ_30MHZ_MASK;
-               reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | fladj;
+               reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
                dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
        }
 }
@@ -507,6 +505,21 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
        return 0;
 }
 
+static void dwc3_core_exit(struct dwc3 *dwc)
+{
+       dwc3_event_buffers_cleanup(dwc);
+
+       usb_phy_shutdown(dwc->usb2_phy);
+       usb_phy_shutdown(dwc->usb3_phy);
+       phy_exit(dwc->usb2_generic_phy);
+       phy_exit(dwc->usb3_generic_phy);
+
+       usb_phy_set_suspend(dwc->usb2_phy, 1);
+       usb_phy_set_suspend(dwc->usb3_phy, 1);
+       phy_power_off(dwc->usb2_generic_phy);
+       phy_power_off(dwc->usb3_generic_phy);
+}
+
 /**
  * dwc3_core_init - Low-level initialization of DWC3 Core
  * @dwc: Pointer to our controller context structure
@@ -556,6 +569,10 @@ static int dwc3_core_init(struct dwc3 *dwc)
        if (ret)
                goto err0;
 
+       ret = dwc3_phy_setup(dwc);
+       if (ret)
+               goto err0;
+
        reg = dwc3_readl(dwc->regs, DWC3_GCTL);
        reg &= ~DWC3_GCTL_SCALEDOWN_MASK;
 
@@ -622,22 +639,45 @@ static int dwc3_core_init(struct dwc3 *dwc)
        if (dwc->revision < DWC3_REVISION_190A)
                reg |= DWC3_GCTL_U2RSTECN;
 
-       dwc3_core_num_eps(dwc);
-
        dwc3_writel(dwc->regs, DWC3_GCTL, reg);
 
-       ret = dwc3_alloc_scratch_buffers(dwc);
-       if (ret)
-               goto err1;
+       dwc3_core_num_eps(dwc);
 
        ret = dwc3_setup_scratch_buffers(dwc);
        if (ret)
+               goto err1;
+
+       /* Adjust Frame Length */
+       dwc3_frame_length_adjustment(dwc);
+
+       usb_phy_set_suspend(dwc->usb2_phy, 0);
+       usb_phy_set_suspend(dwc->usb3_phy, 0);
+       ret = phy_power_on(dwc->usb2_generic_phy);
+       if (ret < 0)
                goto err2;
 
+       ret = phy_power_on(dwc->usb3_generic_phy);
+       if (ret < 0)
+               goto err3;
+
+       ret = dwc3_event_buffers_setup(dwc);
+       if (ret) {
+               dev_err(dwc->dev, "failed to setup event buffers\n");
+               goto err4;
+       }
+
        return 0;
 
+err4:
+       phy_power_off(dwc->usb2_generic_phy);
+
+err3:
+       phy_power_off(dwc->usb3_generic_phy);
+
 err2:
-       dwc3_free_scratch_buffers(dwc);
+       usb_phy_set_suspend(dwc->usb2_phy, 1);
+       usb_phy_set_suspend(dwc->usb3_phy, 1);
+       dwc3_core_exit(dwc);
 
 err1:
        usb_phy_shutdown(dwc->usb2_phy);
@@ -649,15 +689,6 @@ err0:
        return ret;
 }
 
-static void dwc3_core_exit(struct dwc3 *dwc)
-{
-       dwc3_free_scratch_buffers(dwc);
-       usb_phy_shutdown(dwc->usb2_phy);
-       usb_phy_shutdown(dwc->usb3_phy);
-       phy_exit(dwc->usb2_generic_phy);
-       phy_exit(dwc->usb3_generic_phy);
-}
-
 static int dwc3_core_get_phy(struct dwc3 *dwc)
 {
        struct device           *dev = dwc->dev;
@@ -735,7 +766,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
                dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
                ret = dwc3_gadget_init(dwc);
                if (ret) {
-                       dev_err(dev, "failed to initialize gadget\n");
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(dev, "failed to initialize gadget\n");
                        return ret;
                }
                break;
@@ -743,7 +775,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
                dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
                ret = dwc3_host_init(dwc);
                if (ret) {
-                       dev_err(dev, "failed to initialize host\n");
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(dev, "failed to initialize host\n");
                        return ret;
                }
                break;
@@ -751,13 +784,15 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
                dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
                ret = dwc3_host_init(dwc);
                if (ret) {
-                       dev_err(dev, "failed to initialize host\n");
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(dev, "failed to initialize host\n");
                        return ret;
                }
 
                ret = dwc3_gadget_init(dwc);
                if (ret) {
-                       dev_err(dev, "failed to initialize gadget\n");
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(dev, "failed to initialize gadget\n");
                        return ret;
                }
                break;
@@ -793,13 +828,11 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
 static int dwc3_probe(struct platform_device *pdev)
 {
        struct device           *dev = &pdev->dev;
-       struct dwc3_platform_data *pdata = dev_get_platdata(dev);
        struct resource         *res;
        struct dwc3             *dwc;
        u8                      lpm_nyet_threshold;
        u8                      tx_de_emphasis;
        u8                      hird_threshold;
-       u32                     fladj = 0;
 
        int                     ret;
 
@@ -814,16 +847,6 @@ static int dwc3_probe(struct platform_device *pdev)
        dwc->mem = mem;
        dwc->dev = dev;
 
-       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!res) {
-               dev_err(dev, "missing IRQ\n");
-               return -ENODEV;
-       }
-       dwc->xhci_resources[1].start = res->start;
-       dwc->xhci_resources[1].end = res->end;
-       dwc->xhci_resources[1].flags = res->flags;
-       dwc->xhci_resources[1].name = res->name;
-
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(dev, "missing memory resource\n");
@@ -909,40 +932,7 @@ static int dwc3_probe(struct platform_device *pdev)
        device_property_read_string(dev, "snps,hsphy_interface",
                                    &dwc->hsphy_interface);
        device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
-                                &fladj);
-
-       if (pdata) {
-               dwc->maximum_speed = pdata->maximum_speed;
-               dwc->has_lpm_erratum = pdata->has_lpm_erratum;
-               if (pdata->lpm_nyet_threshold)
-                       lpm_nyet_threshold = pdata->lpm_nyet_threshold;
-               dwc->is_utmi_l1_suspend = pdata->is_utmi_l1_suspend;
-               if (pdata->hird_threshold)
-                       hird_threshold = pdata->hird_threshold;
-
-               dwc->usb3_lpm_capable = pdata->usb3_lpm_capable;
-               dwc->dr_mode = pdata->dr_mode;
-
-               dwc->disable_scramble_quirk = pdata->disable_scramble_quirk;
-               dwc->u2exit_lfps_quirk = pdata->u2exit_lfps_quirk;
-               dwc->u2ss_inp3_quirk = pdata->u2ss_inp3_quirk;
-               dwc->req_p1p2p3_quirk = pdata->req_p1p2p3_quirk;
-               dwc->del_p1p2p3_quirk = pdata->del_p1p2p3_quirk;
-               dwc->del_phy_power_chg_quirk = pdata->del_phy_power_chg_quirk;
-               dwc->lfps_filter_quirk = pdata->lfps_filter_quirk;
-               dwc->rx_detect_poll_quirk = pdata->rx_detect_poll_quirk;
-               dwc->dis_u3_susphy_quirk = pdata->dis_u3_susphy_quirk;
-               dwc->dis_u2_susphy_quirk = pdata->dis_u2_susphy_quirk;
-               dwc->dis_enblslpm_quirk = pdata->dis_enblslpm_quirk;
-               dwc->dis_rxdet_inp3_quirk = pdata->dis_rxdet_inp3_quirk;
-
-               dwc->tx_de_emphasis_quirk = pdata->tx_de_emphasis_quirk;
-               if (pdata->tx_de_emphasis)
-                       tx_de_emphasis = pdata->tx_de_emphasis;
-
-               dwc->hsphy_interface = pdata->hsphy_interface;
-               fladj = pdata->fladj_value;
-       }
+                                &dwc->fladj);
 
        dwc->lpm_nyet_threshold = lpm_nyet_threshold;
        dwc->tx_de_emphasis = tx_de_emphasis;
@@ -953,10 +943,6 @@ static int dwc3_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dwc);
        dwc3_cache_hwparams(dwc);
 
-       ret = dwc3_phy_setup(dwc);
-       if (ret)
-               goto err0;
-
        ret = dwc3_core_get_phy(dwc);
        if (ret)
                goto err0;
@@ -969,29 +955,43 @@ static int dwc3_probe(struct platform_device *pdev)
                dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
        }
 
+       pm_runtime_set_active(dev);
+       pm_runtime_use_autosuspend(dev);
+       pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
        pm_runtime_enable(dev);
-       pm_runtime_get_sync(dev);
+       ret = pm_runtime_get_sync(dev);
+       if (ret < 0)
+               goto err1;
+
        pm_runtime_forbid(dev);
 
        ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
        if (ret) {
                dev_err(dwc->dev, "failed to allocate event buffers\n");
                ret = -ENOMEM;
-               goto err1;
+               goto err2;
        }
 
-       if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
+       if (IS_ENABLED(CONFIG_USB_DWC3_HOST) &&
+                       (dwc->dr_mode == USB_DR_MODE_OTG ||
+                                       dwc->dr_mode == USB_DR_MODE_UNKNOWN))
                dwc->dr_mode = USB_DR_MODE_HOST;
-       else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
+       else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET) &&
+                       (dwc->dr_mode == USB_DR_MODE_OTG ||
+                                       dwc->dr_mode == USB_DR_MODE_UNKNOWN))
                dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
 
        if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
                dwc->dr_mode = USB_DR_MODE_OTG;
 
+       ret = dwc3_alloc_scratch_buffers(dwc);
+       if (ret)
+               goto err3;
+
        ret = dwc3_core_init(dwc);
        if (ret) {
                dev_err(dev, "failed to initialize core\n");
-               goto err1;
+               goto err4;
        }
 
        /* Check the maximum_speed parameter */
@@ -1021,31 +1021,12 @@ static int dwc3_probe(struct platform_device *pdev)
                break;
        }
 
-       /* Adjust Frame Length */
-       dwc3_frame_length_adjustment(dwc, fladj);
-
-       usb_phy_set_suspend(dwc->usb2_phy, 0);
-       usb_phy_set_suspend(dwc->usb3_phy, 0);
-       ret = phy_power_on(dwc->usb2_generic_phy);
-       if (ret < 0)
-               goto err2;
-
-       ret = phy_power_on(dwc->usb3_generic_phy);
-       if (ret < 0)
-               goto err3;
-
-       ret = dwc3_event_buffers_setup(dwc);
-       if (ret) {
-               dev_err(dwc->dev, "failed to setup event buffers\n");
-               goto err4;
-       }
-
        ret = dwc3_core_init_mode(dwc);
        if (ret)
                goto err5;
 
        dwc3_debugfs_init(dwc);
-       pm_runtime_allow(dev);
+       pm_runtime_put(dev);
 
        return 0;
 
@@ -1053,19 +1034,18 @@ err5:
        dwc3_event_buffers_cleanup(dwc);
 
 err4:
-       phy_power_off(dwc->usb3_generic_phy);
+       dwc3_free_scratch_buffers(dwc);
 
 err3:
-       phy_power_off(dwc->usb2_generic_phy);
+       dwc3_free_event_buffers(dwc);
+       dwc3_ulpi_exit(dwc);
 
 err2:
-       usb_phy_set_suspend(dwc->usb2_phy, 1);
-       usb_phy_set_suspend(dwc->usb3_phy, 1);
-       dwc3_core_exit(dwc);
+       pm_runtime_allow(&pdev->dev);
 
 err1:
-       dwc3_free_event_buffers(dwc);
-       dwc3_ulpi_exit(dwc);
+       pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
 
 err0:
        /*
@@ -1083,6 +1063,7 @@ static int dwc3_remove(struct platform_device *pdev)
        struct dwc3     *dwc = platform_get_drvdata(pdev);
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
+       pm_runtime_get_sync(&pdev->dev);
        /*
         * restore res->start back to its original value so that, in case the
         * probe is deferred, we don't end up getting error in request the
@@ -1092,133 +1073,192 @@ static int dwc3_remove(struct platform_device *pdev)
 
        dwc3_debugfs_exit(dwc);
        dwc3_core_exit_mode(dwc);
-       dwc3_event_buffers_cleanup(dwc);
-       dwc3_free_event_buffers(dwc);
-
-       usb_phy_set_suspend(dwc->usb2_phy, 1);
-       usb_phy_set_suspend(dwc->usb3_phy, 1);
-       phy_power_off(dwc->usb2_generic_phy);
-       phy_power_off(dwc->usb3_generic_phy);
 
        dwc3_core_exit(dwc);
        dwc3_ulpi_exit(dwc);
 
        pm_runtime_put_sync(&pdev->dev);
+       pm_runtime_allow(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
+       dwc3_free_event_buffers(dwc);
+       dwc3_free_scratch_buffers(dwc);
+
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int dwc3_suspend_common(struct dwc3 *dwc)
 {
-       struct dwc3     *dwc = dev_get_drvdata(dev);
        unsigned long   flags;
 
-       spin_lock_irqsave(&dwc->lock, flags);
-
        switch (dwc->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
        case USB_DR_MODE_OTG:
+               spin_lock_irqsave(&dwc->lock, flags);
                dwc3_gadget_suspend(dwc);
-               /* FALLTHROUGH */
+               spin_unlock_irqrestore(&dwc->lock, flags);
+               break;
        case USB_DR_MODE_HOST:
        default:
-               dwc3_event_buffers_cleanup(dwc);
+               /* do nothing */
                break;
        }
 
-       dwc->gctl = dwc3_readl(dwc->regs, DWC3_GCTL);
-       spin_unlock_irqrestore(&dwc->lock, flags);
+       dwc3_core_exit(dwc);
 
-       usb_phy_shutdown(dwc->usb3_phy);
-       usb_phy_shutdown(dwc->usb2_phy);
-       phy_exit(dwc->usb2_generic_phy);
-       phy_exit(dwc->usb3_generic_phy);
+       return 0;
+}
 
-       usb_phy_set_suspend(dwc->usb2_phy, 1);
-       usb_phy_set_suspend(dwc->usb3_phy, 1);
-       WARN_ON(phy_power_off(dwc->usb2_generic_phy) < 0);
-       WARN_ON(phy_power_off(dwc->usb3_generic_phy) < 0);
+static int dwc3_resume_common(struct dwc3 *dwc)
+{
+       unsigned long   flags;
+       int             ret;
 
-       pinctrl_pm_select_sleep_state(dev);
+       ret = dwc3_core_init(dwc);
+       if (ret)
+               return ret;
+
+       switch (dwc->dr_mode) {
+       case USB_DR_MODE_PERIPHERAL:
+       case USB_DR_MODE_OTG:
+               spin_lock_irqsave(&dwc->lock, flags);
+               dwc3_gadget_resume(dwc);
+               spin_unlock_irqrestore(&dwc->lock, flags);
+               /* FALLTHROUGH */
+       case USB_DR_MODE_HOST:
+       default:
+               /* do nothing */
+               break;
+       }
 
        return 0;
 }
 
-static int dwc3_resume(struct device *dev)
+static int dwc3_runtime_checks(struct dwc3 *dwc)
 {
-       struct dwc3     *dwc = dev_get_drvdata(dev);
-       unsigned long   flags;
+       switch (dwc->dr_mode) {
+       case USB_DR_MODE_PERIPHERAL:
+       case USB_DR_MODE_OTG:
+               if (dwc->connected)
+                       return -EBUSY;
+               break;
+       case USB_DR_MODE_HOST:
+       default:
+               /* do nothing */
+               break;
+       }
+
+       return 0;
+}
+
+static int dwc3_runtime_suspend(struct device *dev)
+{
+       struct dwc3     *dwc = dev_get_drvdata(dev);
        int             ret;
 
-       pinctrl_pm_select_default_state(dev);
+       if (dwc3_runtime_checks(dwc))
+               return -EBUSY;
 
-       usb_phy_set_suspend(dwc->usb2_phy, 0);
-       usb_phy_set_suspend(dwc->usb3_phy, 0);
-       ret = phy_power_on(dwc->usb2_generic_phy);
-       if (ret < 0)
+       ret = dwc3_suspend_common(dwc);
+       if (ret)
                return ret;
 
-       ret = phy_power_on(dwc->usb3_generic_phy);
-       if (ret < 0)
-               goto err_usb2phy_power;
+       device_init_wakeup(dev, true);
 
-       usb_phy_init(dwc->usb3_phy);
-       usb_phy_init(dwc->usb2_phy);
-       ret = phy_init(dwc->usb2_generic_phy);
-       if (ret < 0)
-               goto err_usb3phy_power;
+       return 0;
+}
 
-       ret = phy_init(dwc->usb3_generic_phy);
-       if (ret < 0)
-               goto err_usb2phy_init;
+static int dwc3_runtime_resume(struct device *dev)
+{
+       struct dwc3     *dwc = dev_get_drvdata(dev);
+       int             ret;
 
-       spin_lock_irqsave(&dwc->lock, flags);
+       device_init_wakeup(dev, false);
 
-       dwc3_event_buffers_setup(dwc);
-       dwc3_writel(dwc->regs, DWC3_GCTL, dwc->gctl);
+       ret = dwc3_resume_common(dwc);
+       if (ret)
+               return ret;
 
        switch (dwc->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
        case USB_DR_MODE_OTG:
-               dwc3_gadget_resume(dwc);
-               /* FALLTHROUGH */
+               dwc3_gadget_process_pending_events(dwc);
+               break;
        case USB_DR_MODE_HOST:
        default:
                /* do nothing */
                break;
        }
 
-       spin_unlock_irqrestore(&dwc->lock, flags);
+       pm_runtime_mark_last_busy(dev);
 
-       pm_runtime_disable(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
+       return 0;
+}
+
+static int dwc3_runtime_idle(struct device *dev)
+{
+       struct dwc3     *dwc = dev_get_drvdata(dev);
+
+       switch (dwc->dr_mode) {
+       case USB_DR_MODE_PERIPHERAL:
+       case USB_DR_MODE_OTG:
+               if (dwc3_runtime_checks(dwc))
+                       return -EBUSY;
+               break;
+       case USB_DR_MODE_HOST:
+       default:
+               /* do nothing */
+               break;
+       }
+
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_autosuspend(dev);
 
        return 0;
+}
+#endif /* CONFIG_PM */
 
-err_usb2phy_init:
-       phy_exit(dwc->usb2_generic_phy);
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_suspend(struct device *dev)
+{
+       struct dwc3     *dwc = dev_get_drvdata(dev);
+       int             ret;
 
-err_usb3phy_power:
-       phy_power_off(dwc->usb3_generic_phy);
+       ret = dwc3_suspend_common(dwc);
+       if (ret)
+               return ret;
 
-err_usb2phy_power:
-       phy_power_off(dwc->usb2_generic_phy);
+       pinctrl_pm_select_sleep_state(dev);
 
-       return ret;
+       return 0;
 }
 
+static int dwc3_resume(struct device *dev)
+{
+       struct dwc3     *dwc = dev_get_drvdata(dev);
+       int             ret;
+
+       pinctrl_pm_select_default_state(dev);
+
+       ret = dwc3_resume_common(dwc);
+       if (ret)
+               return ret;
+
+       pm_runtime_disable(dev);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+
+       return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
 static const struct dev_pm_ops dwc3_dev_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume)
+       SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume,
+                       dwc3_runtime_idle)
 };
 
-#define DWC3_PM_OPS    &(dwc3_dev_pm_ops)
-#else
-#define DWC3_PM_OPS    NULL
-#endif
-
 #ifdef CONFIG_OF
 static const struct of_device_id of_dwc3_match[] = {
        {
@@ -1250,7 +1290,7 @@ static struct platform_driver dwc3_driver = {
                .name   = "dwc3",
                .of_match_table = of_match_ptr(of_dwc3_match),
                .acpi_match_table = ACPI_PTR(dwc3_acpi_match),
-               .pm     = DWC3_PM_OPS,
+               .pm     = &dwc3_dev_pm_ops,
        },
 };
 
index 654050684f4fadb4004587edcb653ca03c0d4f3f..45d6de5107c733ff1ba2d9df69e3147b45c9a493 100644 (file)
@@ -86,6 +86,7 @@
 #define DWC3_GCTL              0xc110
 #define DWC3_GEVTEN            0xc114
 #define DWC3_GSTS              0xc118
+#define DWC3_GUCTL1            0xc11c
 #define DWC3_GSNPSID           0xc120
 #define DWC3_GGPIO             0xc124
 #define DWC3_GUID              0xc128
 #define DWC3_DGCMDPAR          0xc710
 #define DWC3_DGCMD             0xc714
 #define DWC3_DALEPENA          0xc720
-#define DWC3_DEPCMDPAR2(n)     (0xc800 + (n * 0x10))
-#define DWC3_DEPCMDPAR1(n)     (0xc804 + (n * 0x10))
-#define DWC3_DEPCMDPAR0(n)     (0xc808 + (n * 0x10))
-#define DWC3_DEPCMD(n)         (0xc80c + (n * 0x10))
+
+#define DWC3_DEP_BASE(n)       (0xc800 + (n * 0x10))
+#define DWC3_DEPCMDPAR2                0x00
+#define DWC3_DEPCMDPAR1                0x04
+#define DWC3_DEPCMDPAR0                0x08
+#define DWC3_DEPCMD            0x0c
 
 /* OTG Registers */
 #define DWC3_OCFG              0xcc00
 #define DWC3_GEVNTSIZ_INTMASK          (1 << 31)
 #define DWC3_GEVNTSIZ_SIZE(n)          ((n) & 0xffff)
 
+/* Global HWPARAMS0 Register */
+#define DWC3_GHWPARAMS0_USB3_MODE(n)   ((n) & 0x3)
+#define DWC3_GHWPARAMS0_MBUS_TYPE(n)   (((n) >> 3) & 0x7)
+#define DWC3_GHWPARAMS0_SBUS_TYPE(n)   (((n) >> 6) & 0x3)
+#define DWC3_GHWPARAMS0_MDWIDTH(n)     (((n) >> 8) & 0xff)
+#define DWC3_GHWPARAMS0_SDWIDTH(n)     (((n) >> 16) & 0xff)
+#define DWC3_GHWPARAMS0_AWIDTH(n)      (((n) >> 24) & 0xff)
+
 /* Global HWPARAMS1 Register */
 #define DWC3_GHWPARAMS1_EN_PWROPT(n)   (((n) & (3 << 24)) >> 24)
 #define DWC3_GHWPARAMS1_EN_PWROPT_NO   0
 /* Global HWPARAMS6 Register */
 #define DWC3_GHWPARAMS6_EN_FPGA                        (1 << 7)
 
+/* Global HWPARAMS7 Register */
+#define DWC3_GHWPARAMS7_RAM1_DEPTH(n)  ((n) & 0xffff)
+#define DWC3_GHWPARAMS7_RAM2_DEPTH(n)  (((n) >> 16) & 0xffff)
+
 /* Global Frame Length Adjustment Register */
 #define DWC3_GFLADJ_30MHZ_SDBND_SEL            (1 << 7)
 #define DWC3_GFLADJ_30MHZ_MASK                 0x3f
@@ -468,6 +483,8 @@ struct dwc3_event_buffer {
  * @endpoint: usb endpoint
  * @pending_list: list of pending requests for this endpoint
  * @started_list: list of started requests on this endpoint
+ * @lock: spinlock for endpoint request queue traversal
+ * @regs: pointer to first endpoint register
  * @trb_pool: array of transaction buffers
  * @trb_pool_dma: dma address of @trb_pool
  * @trb_enqueue: enqueue 'pointer' into TRB array
@@ -480,6 +497,8 @@ struct dwc3_event_buffer {
  * @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
  * @resource_index: Resource transfer index
  * @interval: the interval on which the ISOC transfer is started
+ * @allocated_requests: number of requests allocated
+ * @queued_requests: number of requests queued for transfer
  * @name: a human readable name e.g. ep1out-bulk
  * @direction: true for TX, false for RX
  * @stream_capable: true when streams are enabled
@@ -489,6 +508,9 @@ struct dwc3_ep {
        struct list_head        pending_list;
        struct list_head        started_list;
 
+       spinlock_t              lock;
+       void __iomem            *regs;
+
        struct dwc3_trb         *trb_pool;
        dma_addr_t              trb_pool_dma;
        const struct usb_ss_ep_comp_descriptor *comp_desc;
@@ -521,6 +543,8 @@ struct dwc3_ep {
        u8                      number;
        u8                      type;
        u8                      resource_index;
+       u32                     allocated_requests;
+       u32                     queued_requests;
        u32                     interval;
 
        char                    name[20];
@@ -712,6 +736,8 @@ struct dwc3_scratchpad_array {
  * @gadget_driver: pointer to the gadget driver
  * @regs: base address for our registers
  * @regs_size: address space size
+ * @fladj: frame length adjustment
+ * @irq_gadget: peripheral controller's IRQ number
  * @nr_scratch: number of scratch buffers
  * @u1u2: only used on revisions <1.83a for workaround
  * @maximum_speed: maximum speed requested (mainly for testing purposes)
@@ -744,6 +770,7 @@ struct dwc3_scratchpad_array {
  * @lpm_nyet_threshold: LPM NYET response threshold
  * @hird_threshold: HIRD threshold
  * @hsphy_interface: "utmi" or "ulpi"
+ * @connected: true when we're connected to a host, false otherwise
  * @delayed_status: true when gadget driver asks for delayed status
  * @ep0_bounced: true when we used bounce buffer
  * @ep0_expect_in: true when we expect a DATA IN transfer
@@ -754,6 +781,7 @@ struct dwc3_scratchpad_array {
  *     0       - utmi_sleep_n
  *     1       - utmi_l1_suspend_n
  * @is_fpga: true when we are using the FPGA board
+ * @pending_events: true when we have pending IRQs to be handled
  * @pullups_connected: true when Run/Stop bit is set
  * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
  * @start_config_issued: true when StartConfig command has been issued
@@ -818,10 +846,8 @@ struct dwc3 {
 
        enum usb_dr_mode        dr_mode;
 
-       /* used for suspend/resume */
-       u32                     dcfg;
-       u32                     gctl;
-
+       u32                     fladj;
+       u32                     irq_gadget;
        u32                     nr_scratch;
        u32                     u1u2;
        u32                     maximum_speed;
@@ -860,7 +886,7 @@ struct dwc3 {
  * just so dwc31 revisions are always larger than dwc3.
  */
 #define DWC3_REVISION_IS_DWC31         0x80000000
-#define DWC3_USB31_REVISION_110A       (0x3131302a | DWC3_REVISION_IS_USB31)
+#define DWC3_USB31_REVISION_110A       (0x3131302a | DWC3_REVISION_IS_DWC31)
 
        enum dwc3_ep0_next      ep0_next_event;
        enum dwc3_ep0_state     ep0state;
@@ -890,6 +916,7 @@ struct dwc3 {
 
        const char              *hsphy_interface;
 
+       unsigned                connected:1;
        unsigned                delayed_status:1;
        unsigned                ep0_bounced:1;
        unsigned                ep0_expect_in:1;
@@ -897,6 +924,7 @@ struct dwc3 {
        unsigned                has_lpm_erratum:1;
        unsigned                is_utmi_l1_suspend:1;
        unsigned                is_fpga:1;
+       unsigned                pending_events:1;
        unsigned                pullups_connected:1;
        unsigned                setup_packet_pending:1;
        unsigned                three_stage_setup:1;
@@ -1094,8 +1122,8 @@ void dwc3_gadget_exit(struct dwc3 *dwc);
 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
 int dwc3_gadget_get_link_state(struct dwc3 *dwc);
 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
-int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
-               unsigned cmd, struct dwc3_gadget_ep_cmd_params *params);
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+               struct dwc3_gadget_ep_cmd_params *params);
 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
 #else
 static inline int dwc3_gadget_init(struct dwc3 *dwc)
@@ -1110,8 +1138,8 @@ static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc,
                enum dwc3_link_state state)
 { return 0; }
 
-static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
-               unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+               struct dwc3_gadget_ep_cmd_params *params)
 { return 0; }
 static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
                int cmd, u32 param)
@@ -1122,6 +1150,7 @@ static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
 #if !IS_ENABLED(CONFIG_USB_DWC3_HOST)
 int dwc3_gadget_suspend(struct dwc3 *dwc);
 int dwc3_gadget_resume(struct dwc3 *dwc);
+void dwc3_gadget_process_pending_events(struct dwc3 *dwc);
 #else
 static inline int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
@@ -1132,6 +1161,10 @@ static inline int dwc3_gadget_resume(struct dwc3 *dwc)
 {
        return 0;
 }
+
+static inline void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+{
+}
 #endif /* !IS_ENABLED(CONFIG_USB_DWC3_HOST) */
 
 #if IS_ENABLED(CONFIG_USB_DWC3_ULPI)
index 71e318025964ede1f2e48c910f9da630e5574886..22dfc3dd6a13037e81561bba94b7c65256787f7f 100644 (file)
@@ -128,56 +128,112 @@ dwc3_gadget_link_string(enum dwc3_link_state link_state)
  * dwc3_gadget_event_string - returns event name
  * @event: the event code
  */
-static inline const char *dwc3_gadget_event_string(u8 event)
+static inline const char *
+dwc3_gadget_event_string(const struct dwc3_event_devt *event)
 {
-       switch (event) {
+       static char str[256];
+       enum dwc3_link_state state = event->event_info & DWC3_LINK_STATE_MASK;
+
+       switch (event->type) {
        case DWC3_DEVICE_EVENT_DISCONNECT:
-               return "Disconnect";
+               sprintf(str, "Disconnect: [%s]",
+                               dwc3_gadget_link_string(state));
+               break;
        case DWC3_DEVICE_EVENT_RESET:
-               return "Reset";
+               sprintf(str, "Reset [%s]", dwc3_gadget_link_string(state));
+               break;
        case DWC3_DEVICE_EVENT_CONNECT_DONE:
-               return "Connection Done";
+               sprintf(str, "Connection Done [%s]",
+                               dwc3_gadget_link_string(state));
+               break;
        case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
-               return "Link Status Change";
+               sprintf(str, "Link Change [%s]",
+                               dwc3_gadget_link_string(state));
+               break;
        case DWC3_DEVICE_EVENT_WAKEUP:
-               return "WakeUp";
+               sprintf(str, "WakeUp [%s]", dwc3_gadget_link_string(state));
+               break;
        case DWC3_DEVICE_EVENT_EOPF:
-               return "End-Of-Frame";
+               sprintf(str, "End-Of-Frame [%s]",
+                               dwc3_gadget_link_string(state));
+               break;
        case DWC3_DEVICE_EVENT_SOF:
-               return "Start-Of-Frame";
+               sprintf(str, "Start-Of-Frame [%s]",
+                               dwc3_gadget_link_string(state));
+               break;
        case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
-               return "Erratic Error";
+               sprintf(str, "Erratic Error [%s]",
+                               dwc3_gadget_link_string(state));
+               break;
        case DWC3_DEVICE_EVENT_CMD_CMPL:
-               return "Command Complete";
+               sprintf(str, "Command Complete [%s]",
+                               dwc3_gadget_link_string(state));
+               break;
        case DWC3_DEVICE_EVENT_OVERFLOW:
-               return "Overflow";
+               sprintf(str, "Overflow [%s]", dwc3_gadget_link_string(state));
+               break;
+       default:
+               sprintf(str, "UNKNOWN");
        }
 
-       return "UNKNOWN";
+       return str;
 }
 
 /**
  * dwc3_ep_event_string - returns event name
  * @event: then event code
  */
-static inline const char *dwc3_ep_event_string(u8 event)
+static inline const char *
+dwc3_ep_event_string(const struct dwc3_event_depevt *event)
 {
-       switch (event) {
+       u8 epnum = event->endpoint_number;
+       static char str[256];
+       int status;
+       int ret;
+
+       ret = sprintf(str, "ep%d%s: ", epnum >> 1,
+                       (epnum & 1) ? "in" : "in");
+       if (ret < 0)
+               return "UNKNOWN";
+
+       switch (event->endpoint_event) {
        case DWC3_DEPEVT_XFERCOMPLETE:
-               return "Transfer Complete";
+               strcat(str, "Transfer Complete");
+               break;
        case DWC3_DEPEVT_XFERINPROGRESS:
-               return "Transfer In-Progress";
+               strcat(str, "Transfer In-Progress");
+               break;
        case DWC3_DEPEVT_XFERNOTREADY:
-               return "Transfer Not Ready";
+               strcat(str, "Transfer Not Ready");
+               status = event->status & DEPEVT_STATUS_TRANSFER_ACTIVE;
+               strcat(str, status ? " (Active)" : " (Not Active)");
+               break;
        case DWC3_DEPEVT_RXTXFIFOEVT:
-               return "FIFO";
+               strcat(str, "FIFO");
+               break;
        case DWC3_DEPEVT_STREAMEVT:
-               return "Stream";
+               status = event->status;
+
+               switch (status) {
+               case DEPEVT_STREAMEVT_FOUND:
+                       sprintf(str + ret, " Stream %d Found",
+                                       event->parameters);
+                       break;
+               case DEPEVT_STREAMEVT_NOTFOUND:
+               default:
+                       strcat(str, " Stream Not Found");
+                       break;
+               }
+
+               break;
        case DWC3_DEPEVT_EPCMDCMPLT:
-               return "Endpoint Command Complete";
+               strcat(str, "Endpoint Command Complete");
+               break;
+       default:
+               sprintf(str, "UNKNOWN");
        }
 
-       return "UNKNOWN";
+       return str;
 }
 
 /**
@@ -214,6 +270,46 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
        }
 }
 
+static inline const char *dwc3_decode_event(u32 event)
+{
+       const union dwc3_event evt = (union dwc3_event) event;
+
+       if (evt.type.is_devspec)
+               return dwc3_gadget_event_string(&evt.devt);
+       else
+               return dwc3_ep_event_string(&evt.depevt);
+}
+
+static inline const char *dwc3_ep_cmd_status_string(int status)
+{
+       switch (status) {
+       case -ETIMEDOUT:
+               return "Timed Out";
+       case 0:
+               return "Successful";
+       case DEPEVT_TRANSFER_NO_RESOURCE:
+               return "No Resource";
+       case DEPEVT_TRANSFER_BUS_EXPIRY:
+               return "Bus Expiry";
+       default:
+               return "UNKNOWN";
+       }
+}
+
+static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
+{
+       switch (status) {
+       case -ETIMEDOUT:
+               return "Timed Out";
+       case 0:
+               return "Successful";
+       case 1:
+               return "Error";
+       default:
+               return "UNKNOWN";
+       }
+}
+
 void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
 
 #ifdef CONFIG_DEBUG_FS
index b1dd3c6d7ef7f5ae2e94f5da4f3dd91e65f1810e..31926dda43c9638f96a3fa6a51a2f5ef6a6d2359 100644 (file)
 #define dump_register(nm)                              \
 {                                                      \
        .name   = __stringify(nm),                      \
-       .offset = DWC3_ ##nm - DWC3_GLOBALS_REGS_START, \
+       .offset = DWC3_ ##nm,                           \
 }
 
+#define dump_ep_register_set(n)                        \
+       {                                       \
+               .name = "DEPCMDPAR2("__stringify(n)")", \
+               .offset = DWC3_DEP_BASE(n) +    \
+                       DWC3_DEPCMDPAR2,        \
+       },                                      \
+       {                                       \
+               .name = "DEPCMDPAR1("__stringify(n)")", \
+               .offset = DWC3_DEP_BASE(n) +    \
+                       DWC3_DEPCMDPAR1,        \
+       },                                      \
+       {                                       \
+               .name = "DEPCMDPAR0("__stringify(n)")", \
+               .offset = DWC3_DEP_BASE(n) +    \
+                       DWC3_DEPCMDPAR0,        \
+       },                                      \
+       {                                       \
+               .name = "DEPCMD("__stringify(n)")",     \
+               .offset = DWC3_DEP_BASE(n) +    \
+                       DWC3_DEPCMD,            \
+       }
+
+
 static const struct debugfs_reg32 dwc3_regs[] = {
        dump_register(GSBUSCFG0),
        dump_register(GSBUSCFG1),
@@ -47,6 +70,7 @@ static const struct debugfs_reg32 dwc3_regs[] = {
        dump_register(GCTL),
        dump_register(GEVTEN),
        dump_register(GSTS),
+       dump_register(GUCTL1),
        dump_register(GSNPSID),
        dump_register(GGPIO),
        dump_register(GUID),
@@ -218,137 +242,38 @@ static const struct debugfs_reg32 dwc3_regs[] = {
        dump_register(DGCMD),
        dump_register(DALEPENA),
 
-       dump_register(DEPCMDPAR2(0)),
-       dump_register(DEPCMDPAR2(1)),
-       dump_register(DEPCMDPAR2(2)),
-       dump_register(DEPCMDPAR2(3)),
-       dump_register(DEPCMDPAR2(4)),
-       dump_register(DEPCMDPAR2(5)),
-       dump_register(DEPCMDPAR2(6)),
-       dump_register(DEPCMDPAR2(7)),
-       dump_register(DEPCMDPAR2(8)),
-       dump_register(DEPCMDPAR2(9)),
-       dump_register(DEPCMDPAR2(10)),
-       dump_register(DEPCMDPAR2(11)),
-       dump_register(DEPCMDPAR2(12)),
-       dump_register(DEPCMDPAR2(13)),
-       dump_register(DEPCMDPAR2(14)),
-       dump_register(DEPCMDPAR2(15)),
-       dump_register(DEPCMDPAR2(16)),
-       dump_register(DEPCMDPAR2(17)),
-       dump_register(DEPCMDPAR2(18)),
-       dump_register(DEPCMDPAR2(19)),
-       dump_register(DEPCMDPAR2(20)),
-       dump_register(DEPCMDPAR2(21)),
-       dump_register(DEPCMDPAR2(22)),
-       dump_register(DEPCMDPAR2(23)),
-       dump_register(DEPCMDPAR2(24)),
-       dump_register(DEPCMDPAR2(25)),
-       dump_register(DEPCMDPAR2(26)),
-       dump_register(DEPCMDPAR2(27)),
-       dump_register(DEPCMDPAR2(28)),
-       dump_register(DEPCMDPAR2(29)),
-       dump_register(DEPCMDPAR2(30)),
-       dump_register(DEPCMDPAR2(31)),
-
-       dump_register(DEPCMDPAR1(0)),
-       dump_register(DEPCMDPAR1(1)),
-       dump_register(DEPCMDPAR1(2)),
-       dump_register(DEPCMDPAR1(3)),
-       dump_register(DEPCMDPAR1(4)),
-       dump_register(DEPCMDPAR1(5)),
-       dump_register(DEPCMDPAR1(6)),
-       dump_register(DEPCMDPAR1(7)),
-       dump_register(DEPCMDPAR1(8)),
-       dump_register(DEPCMDPAR1(9)),
-       dump_register(DEPCMDPAR1(10)),
-       dump_register(DEPCMDPAR1(11)),
-       dump_register(DEPCMDPAR1(12)),
-       dump_register(DEPCMDPAR1(13)),
-       dump_register(DEPCMDPAR1(14)),
-       dump_register(DEPCMDPAR1(15)),
-       dump_register(DEPCMDPAR1(16)),
-       dump_register(DEPCMDPAR1(17)),
-       dump_register(DEPCMDPAR1(18)),
-       dump_register(DEPCMDPAR1(19)),
-       dump_register(DEPCMDPAR1(20)),
-       dump_register(DEPCMDPAR1(21)),
-       dump_register(DEPCMDPAR1(22)),
-       dump_register(DEPCMDPAR1(23)),
-       dump_register(DEPCMDPAR1(24)),
-       dump_register(DEPCMDPAR1(25)),
-       dump_register(DEPCMDPAR1(26)),
-       dump_register(DEPCMDPAR1(27)),
-       dump_register(DEPCMDPAR1(28)),
-       dump_register(DEPCMDPAR1(29)),
-       dump_register(DEPCMDPAR1(30)),
-       dump_register(DEPCMDPAR1(31)),
-
-       dump_register(DEPCMDPAR0(0)),
-       dump_register(DEPCMDPAR0(1)),
-       dump_register(DEPCMDPAR0(2)),
-       dump_register(DEPCMDPAR0(3)),
-       dump_register(DEPCMDPAR0(4)),
-       dump_register(DEPCMDPAR0(5)),
-       dump_register(DEPCMDPAR0(6)),
-       dump_register(DEPCMDPAR0(7)),
-       dump_register(DEPCMDPAR0(8)),
-       dump_register(DEPCMDPAR0(9)),
-       dump_register(DEPCMDPAR0(10)),
-       dump_register(DEPCMDPAR0(11)),
-       dump_register(DEPCMDPAR0(12)),
-       dump_register(DEPCMDPAR0(13)),
-       dump_register(DEPCMDPAR0(14)),
-       dump_register(DEPCMDPAR0(15)),
-       dump_register(DEPCMDPAR0(16)),
-       dump_register(DEPCMDPAR0(17)),
-       dump_register(DEPCMDPAR0(18)),
-       dump_register(DEPCMDPAR0(19)),
-       dump_register(DEPCMDPAR0(20)),
-       dump_register(DEPCMDPAR0(21)),
-       dump_register(DEPCMDPAR0(22)),
-       dump_register(DEPCMDPAR0(23)),
-       dump_register(DEPCMDPAR0(24)),
-       dump_register(DEPCMDPAR0(25)),
-       dump_register(DEPCMDPAR0(26)),
-       dump_register(DEPCMDPAR0(27)),
-       dump_register(DEPCMDPAR0(28)),
-       dump_register(DEPCMDPAR0(29)),
-       dump_register(DEPCMDPAR0(30)),
-       dump_register(DEPCMDPAR0(31)),
-
-       dump_register(DEPCMD(0)),
-       dump_register(DEPCMD(1)),
-       dump_register(DEPCMD(2)),
-       dump_register(DEPCMD(3)),
-       dump_register(DEPCMD(4)),
-       dump_register(DEPCMD(5)),
-       dump_register(DEPCMD(6)),
-       dump_register(DEPCMD(7)),
-       dump_register(DEPCMD(8)),
-       dump_register(DEPCMD(9)),
-       dump_register(DEPCMD(10)),
-       dump_register(DEPCMD(11)),
-       dump_register(DEPCMD(12)),
-       dump_register(DEPCMD(13)),
-       dump_register(DEPCMD(14)),
-       dump_register(DEPCMD(15)),
-       dump_register(DEPCMD(16)),
-       dump_register(DEPCMD(17)),
-       dump_register(DEPCMD(18)),
-       dump_register(DEPCMD(19)),
-       dump_register(DEPCMD(20)),
-       dump_register(DEPCMD(21)),
-       dump_register(DEPCMD(22)),
-       dump_register(DEPCMD(23)),
-       dump_register(DEPCMD(24)),
-       dump_register(DEPCMD(25)),
-       dump_register(DEPCMD(26)),
-       dump_register(DEPCMD(27)),
-       dump_register(DEPCMD(28)),
-       dump_register(DEPCMD(29)),
-       dump_register(DEPCMD(30)),
-       dump_register(DEPCMD(31)),
+       dump_ep_register_set(0),
+       dump_ep_register_set(1),
+       dump_ep_register_set(2),
+       dump_ep_register_set(3),
+       dump_ep_register_set(4),
+       dump_ep_register_set(5),
+       dump_ep_register_set(6),
+       dump_ep_register_set(7),
+       dump_ep_register_set(8),
+       dump_ep_register_set(9),
+       dump_ep_register_set(10),
+       dump_ep_register_set(11),
+       dump_ep_register_set(12),
+       dump_ep_register_set(13),
+       dump_ep_register_set(14),
+       dump_ep_register_set(15),
+       dump_ep_register_set(16),
+       dump_ep_register_set(17),
+       dump_ep_register_set(18),
+       dump_ep_register_set(19),
+       dump_ep_register_set(20),
+       dump_ep_register_set(21),
+       dump_ep_register_set(22),
+       dump_ep_register_set(23),
+       dump_ep_register_set(24),
+       dump_ep_register_set(25),
+       dump_ep_register_set(26),
+       dump_ep_register_set(27),
+       dump_ep_register_set(28),
+       dump_ep_register_set(29),
+       dump_ep_register_set(30),
+       dump_ep_register_set(31),
 
        dump_register(OCFG),
        dump_register(OCTL),
@@ -939,7 +864,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
 
        dwc->regset->regs = dwc3_regs;
        dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
-       dwc->regset->base = dwc->regs;
+       dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
 
        file = debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
        if (!file)
index af264493bbaeb972bb82bc401c864ac764522ad4..29e80cc9b634aab72f59e73c71667dfef9f86a45 100644 (file)
@@ -165,7 +165,7 @@ static void dwc3_omap_write_utmi_ctrl(struct dwc3_omap *omap, u32 value)
 
 static u32 dwc3_omap_read_irq0_status(struct dwc3_omap *omap)
 {
-       return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_0 -
+       return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_0 -
                                                omap->irq0_offset);
 }
 
@@ -178,7 +178,7 @@ static void dwc3_omap_write_irq0_status(struct dwc3_omap *omap, u32 value)
 
 static u32 dwc3_omap_read_irqmisc_status(struct dwc3_omap *omap)
 {
-       return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_MISC +
+       return dwc3_omap_readl(omap->base, USBOTGSS_IRQSTATUS_RAW_MISC +
                                                omap->irqmisc_offset);
 }
 
@@ -231,35 +231,30 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
                }
 
                val = dwc3_omap_read_utmi_ctrl(omap);
-               val &= ~(USBOTGSS_UTMI_OTG_CTRL_IDDIG
-                               | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
-                               | USBOTGSS_UTMI_OTG_CTRL_SESSEND);
-               val |= USBOTGSS_UTMI_OTG_CTRL_SESSVALID
-                               | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+               val &= ~USBOTGSS_UTMI_OTG_CTRL_IDDIG;
                dwc3_omap_write_utmi_ctrl(omap, val);
                break;
 
        case OMAP_DWC3_VBUS_VALID:
                val = dwc3_omap_read_utmi_ctrl(omap);
                val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
-               val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG
-                               | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
-                               | USBOTGSS_UTMI_OTG_CTRL_SESSVALID
-                               | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+               val |= USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+                               | USBOTGSS_UTMI_OTG_CTRL_SESSVALID;
                dwc3_omap_write_utmi_ctrl(omap, val);
                break;
 
        case OMAP_DWC3_ID_FLOAT:
                if (omap->vbus_reg)
                        regulator_disable(omap->vbus_reg);
+               val = dwc3_omap_read_utmi_ctrl(omap);
+               val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+               dwc3_omap_write_utmi_ctrl(omap, val);
 
        case OMAP_DWC3_VBUS_OFF:
                val = dwc3_omap_read_utmi_ctrl(omap);
                val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
-                               | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
-                               | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT);
-               val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND
-                               | USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+                               | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID);
+               val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND;
                dwc3_omap_write_utmi_ctrl(omap, val);
                break;
 
@@ -268,19 +263,38 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
        }
 }
 
+static void dwc3_omap_enable_irqs(struct dwc3_omap *omap);
+static void dwc3_omap_disable_irqs(struct dwc3_omap *omap);
+
 static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
+{
+       struct dwc3_omap        *omap = _omap;
+
+       if (dwc3_omap_read_irqmisc_status(omap) ||
+           dwc3_omap_read_irq0_status(omap)) {
+               /* mask irqs */
+               dwc3_omap_disable_irqs(omap);
+               return IRQ_WAKE_THREAD;
+       }
+
+       return IRQ_NONE;
+}
+
+static irqreturn_t dwc3_omap_interrupt_thread(int irq, void *_omap)
 {
        struct dwc3_omap        *omap = _omap;
        u32                     reg;
 
+       /* clear irq status flags */
        reg = dwc3_omap_read_irqmisc_status(omap);
-
        dwc3_omap_write_irqmisc_status(omap, reg);
 
        reg = dwc3_omap_read_irq0_status(omap);
-
        dwc3_omap_write_irq0_status(omap, reg);
 
+       /* unmask irqs */
+       dwc3_omap_enable_irqs(omap);
+
        return IRQ_HANDLED;
 }
 
@@ -497,8 +511,9 @@ static int dwc3_omap_probe(struct platform_device *pdev)
        /* check the DMA Status */
        reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
 
-       ret = devm_request_irq(dev, omap->irq, dwc3_omap_interrupt, 0,
-                       "dwc3-omap", omap);
+       ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
+                                       dwc3_omap_interrupt_thread, IRQF_SHARED,
+                                       "dwc3-omap", omap);
        if (ret) {
                dev_err(dev, "failed to request IRQ #%d --> %d\n",
                                omap->irq, ret);
index 14196cd416b3dda692fa12e70d6ce627074f7d3d..45f5a232d9fb658b76845e19bb3ef4fa0a0aabf6 100644 (file)
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 #include <linux/platform_device.h>
 #include <linux/gpio/consumer.h>
 #include <linux/acpi.h>
-
-#include "platform_data.h"
+#include <linux/delay.h>
 
 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3                0xabcd
 #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI    0xabce
@@ -51,62 +51,70 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
 {
        if (pdev->vendor == PCI_VENDOR_ID_AMD &&
            pdev->device == PCI_DEVICE_ID_AMD_NL_USB) {
-               struct dwc3_platform_data pdata;
-
-               memset(&pdata, 0, sizeof(pdata));
-
-               pdata.has_lpm_erratum = true;
-               pdata.lpm_nyet_threshold = 0xf;
-
-               pdata.u2exit_lfps_quirk = true;
-               pdata.u2ss_inp3_quirk = true;
-               pdata.req_p1p2p3_quirk = true;
-               pdata.del_p1p2p3_quirk = true;
-               pdata.del_phy_power_chg_quirk = true;
-               pdata.lfps_filter_quirk = true;
-               pdata.rx_detect_poll_quirk = true;
-
-               pdata.tx_de_emphasis_quirk = true;
-               pdata.tx_de_emphasis = 1;
-
-               /*
-                * FIXME these quirks should be removed when AMD NL
-                * taps out
-                */
-               pdata.disable_scramble_quirk = true;
-               pdata.dis_u3_susphy_quirk = true;
-               pdata.dis_u2_susphy_quirk = true;
-
-               return platform_device_add_data(dwc3, &pdata, sizeof(pdata));
+               struct property_entry properties[] = {
+                       PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+                       PROPERTY_ENTRY_U8("snps,lpm-nyet-threshold", 0xf),
+                       PROPERTY_ENTRY_BOOL("snps,u2exit_lfps_quirk"),
+                       PROPERTY_ENTRY_BOOL("snps,u2ss_inp3_quirk"),
+                       PROPERTY_ENTRY_BOOL("snps,req_p1p2p3_quirk"),
+                       PROPERTY_ENTRY_BOOL("snps,del_p1p2p3_quirk"),
+                       PROPERTY_ENTRY_BOOL("snps,del_phy_power_chg_quirk"),
+                       PROPERTY_ENTRY_BOOL("snps,lfps_filter_quirk"),
+                       PROPERTY_ENTRY_BOOL("snps,rx_detect_poll_quirk"),
+                       PROPERTY_ENTRY_BOOL("snps,tx_de_emphasis_quirk"),
+                       PROPERTY_ENTRY_U8("snps,tx_de_emphasis", 1),
+                       /*
+                        * FIXME these quirks should be removed when AMD NL
+                        * tapes out
+                        */
+                       PROPERTY_ENTRY_BOOL("snps,disable_scramble_quirk"),
+                       PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
+                       PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+                       { },
+               };
+
+               return platform_device_add_properties(dwc3, properties);
        }
 
-       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-           pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
-               struct gpio_desc *gpio;
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
+               int ret;
 
-               acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
-                                         acpi_dwc3_byt_gpios);
+               struct property_entry properties[] = {
+                       PROPERTY_ENTRY_STRING("dr-mode", "peripheral"),
+                       { }
+               };
 
-               /*
-                * These GPIOs will turn on the USB2 PHY. Note that we have to
-                * put the gpio descriptors again here because the phy driver
-                * might want to grab them, too.
-                */
-               gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
-               if (IS_ERR(gpio))
-                       return PTR_ERR(gpio);
+               ret = platform_device_add_properties(dwc3, properties);
+               if (ret < 0)
+                       return ret;
 
-               gpiod_set_value_cansleep(gpio, 1);
-               gpiod_put(gpio);
+               if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) {
+                       struct gpio_desc *gpio;
 
-               gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
-               if (IS_ERR(gpio))
-                       return PTR_ERR(gpio);
+                       acpi_dev_add_driver_gpios(ACPI_COMPANION(&pdev->dev),
+                                       acpi_dwc3_byt_gpios);
+
+                       /*
+                        * These GPIOs will turn on the USB2 PHY. Note that we have to
+                        * put the gpio descriptors again here because the phy driver
+                        * might want to grab them, too.
+                        */
+                       gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
+                       if (IS_ERR(gpio))
+                               return PTR_ERR(gpio);
 
-               if (gpio) {
                        gpiod_set_value_cansleep(gpio, 1);
                        gpiod_put(gpio);
-                       usleep_range(10000, 11000);
+
+                       gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+                       if (IS_ERR(gpio))
+                               return PTR_ERR(gpio);
+
+                       if (gpio) {
+                               gpiod_set_value_cansleep(gpio, 1);
+                               gpiod_put(gpio);
+                               usleep_range(10000, 11000);
+                       }
                }
        }
 
@@ -114,15 +122,14 @@ static int dwc3_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc3)
            (pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 ||
             pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI ||
             pdev->device == PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31)) {
-
-               struct dwc3_platform_data pdata;
-
-               memset(&pdata, 0, sizeof(pdata));
-               pdata.usb3_lpm_capable = true;
-               pdata.has_lpm_erratum = true;
-               pdata.dis_enblslpm_quirk = true;
-
-               return platform_device_add_data(dwc3, &pdata, sizeof(pdata));
+               struct property_entry properties[] = {
+                       PROPERTY_ENTRY_BOOL("snps,usb3_lpm_capable"),
+                       PROPERTY_ENTRY_BOOL("snps,has-lpm-erratum"),
+                       PROPERTY_ENTRY_BOOL("snps,dis_enblslpm_quirk"),
+                       { },
+               };
+
+               return platform_device_add_properties(dwc3, properties);
        }
 
        return 0;
@@ -180,7 +187,11 @@ static int dwc3_pci_probe(struct pci_dev *pci,
                goto err;
        }
 
+       device_init_wakeup(dev, true);
+       device_set_run_wake(dev, true);
        pci_set_drvdata(pci, dwc3);
+       pm_runtime_put(dev);
+
        return 0;
 err:
        platform_device_put(dwc3);
@@ -189,6 +200,8 @@ err:
 
 static void dwc3_pci_remove(struct pci_dev *pci)
 {
+       device_init_wakeup(&pci->dev, false);
+       pm_runtime_get(&pci->dev);
        acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pci->dev));
        platform_device_unregister(pci_get_drvdata(pci));
 }
@@ -219,11 +232,43 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
 };
 MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
 
+#ifdef CONFIG_PM
+static int dwc3_pci_runtime_suspend(struct device *dev)
+{
+       if (device_run_wake(dev))
+               return 0;
+
+       return -EBUSY;
+}
+
+static int dwc3_pci_pm_dummy(struct device *dev)
+{
+       /*
+        * There's nothing to do here. No, seriously. Everything is either taken
+        * care either by PCI subsystem or dwc3/core.c, so we have nothing
+        * missing here.
+        *
+        * So you'd think we didn't need this at all, but PCI subsystem will
+        * bail out if we don't have a valid callback :-s
+        */
+       return 0;
+}
+#endif /* CONFIG_PM */
+
+static struct dev_pm_ops dwc3_pci_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_pm_dummy, dwc3_pci_pm_dummy)
+       SET_RUNTIME_PM_OPS(dwc3_pci_runtime_suspend, dwc3_pci_pm_dummy,
+               NULL)
+};
+
 static struct pci_driver dwc3_pci_driver = {
        .name           = "dwc3-pci",
        .id_table       = dwc3_pci_id_table,
        .probe          = dwc3_pci_probe,
        .remove         = dwc3_pci_remove,
+       .driver         = {
+               .pm     = &dwc3_pci_dev_pm_ops,
+       }
 };
 
 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
index 51b52a79dfecec3c06ff5849b5b761784c7b8a5d..fe79d771dee4c4c936ae72756020e047e5500db4 100644 (file)
@@ -98,8 +98,7 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
 
        trace_dwc3_prepare_trb(dep, trb);
 
-       ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                       DWC3_DEPCMD_STARTTRANSFER, &params);
+       ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_STARTTRANSFER, &params);
        if (ret < 0) {
                dwc3_trace(trace_dwc3_ep0, "%s STARTTRANSFER failed",
                                dep->name);
@@ -107,9 +106,7 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
        }
 
        dep->flags |= DWC3_EP_BUSY;
-       dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
-                       dep->number);
-
+       dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
        dwc->ep0_next_event = DWC3_EP0_COMPLETE;
 
        return 0;
@@ -499,7 +496,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
        case USB_RECIP_ENDPOINT:
                switch (wValue) {
                case USB_ENDPOINT_HALT:
-                       dep = dwc3_wIndex_to_dep(dwc, wIndex);
+                       dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
                        if (!dep)
                                return -EINVAL;
                        if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
@@ -622,8 +619,8 @@ static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req)
        struct timing {
                u8      u1sel;
                u8      u1pel;
-               u16     u2sel;
-               u16     u2pel;
+               __le16  u2sel;
+               __le16  u2pel;
        } __packed timing;
 
        int             ret;
@@ -980,7 +977,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                ret = usb_gadget_map_request(&dwc->gadget, &req->request,
                                dep->number);
                if (ret) {
-                       dwc3_trace(trace_dwc3_ep0, "failed to map request\n");
+                       dwc3_trace(trace_dwc3_ep0, "failed to map request");
                        return;
                }
 
@@ -1008,7 +1005,7 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
                ret = usb_gadget_map_request(&dwc->gadget, &req->request,
                                dep->number);
                if (ret) {
-                       dwc3_trace(trace_dwc3_ep0, "failed to map request\n");
+                       dwc3_trace(trace_dwc3_ep0, "failed to map request");
                        return;
                }
 
@@ -1058,7 +1055,7 @@ static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep)
        cmd |= DWC3_DEPCMD_CMDIOC;
        cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
        memset(&params, 0, sizeof(params));
-       ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+       ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
        WARN_ON_ONCE(ret);
        dep->resource_index = 0;
 }
@@ -1112,11 +1109,8 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
 void dwc3_ep0_interrupt(struct dwc3 *dwc,
                const struct dwc3_event_depevt *event)
 {
-       u8                      epnum = event->endpoint_number;
-
-       dwc3_trace(trace_dwc3_ep0, "%s while ep%d%s in state '%s'",
-                       dwc3_ep_event_string(event->endpoint_event),
-                       epnum >> 1, (epnum & 1) ? "in" : "out",
+       dwc3_trace(trace_dwc3_ep0, "%s: state '%s'",
+                       dwc3_ep_event_string(event),
                        dwc3_ep0_state_string(dwc->ep0state));
 
        switch (event->endpoint_event) {
index 07248ff1be5c150b04c2d0f6297290489c94c205..8f8c2157910e6b848561f75f192b7a45dd4225d1 100644 (file)
@@ -145,21 +145,29 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
        return -ETIMEDOUT;
 }
 
-static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
+/**
+ * dwc3_ep_inc_trb() - Increment a TRB index.
+ * @index - Pointer to the TRB index to increment.
+ *
+ * The index should never point to the link TRB. After incrementing,
+ * if it is point to the link TRB, wrap around to the beginning. The
+ * link TRB is always at the last TRB entry.
+ */
+static void dwc3_ep_inc_trb(u8 *index)
 {
-       dep->trb_enqueue++;
-       dep->trb_enqueue %= DWC3_TRB_NUM;
+       (*index)++;
+       if (*index == (DWC3_TRB_NUM - 1))
+               *index = 0;
 }
 
-static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
+static void dwc3_ep_inc_enq(struct dwc3_ep *dep)
 {
-       dep->trb_dequeue++;
-       dep->trb_dequeue %= DWC3_TRB_NUM;
+       dwc3_ep_inc_trb(&dep->trb_enqueue);
 }
 
-static int dwc3_ep_is_last_trb(unsigned int index)
+static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
 {
-       return index == DWC3_TRB_NUM - 1;
+       dwc3_ep_inc_trb(&dep->trb_dequeue);
 }
 
 void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
@@ -172,13 +180,6 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
                i = 0;
                do {
                        dwc3_ep_inc_deq(dep);
-                       /*
-                        * Skip LINK TRB. We can't use req->trb and check for
-                        * DWC3_TRBCTL_LINK_TRB because it points the TRB we
-                        * just completed (not the LINK TRB).
-                        */
-                       if (dwc3_ep_is_last_trb(dep->trb_dequeue))
-                               dwc3_ep_inc_deq(dep);
                } while(++i < req->request.num_mapped_sgs);
                req->started = false;
        }
@@ -199,57 +200,54 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
        spin_unlock(&dwc->lock);
        usb_gadget_giveback_request(&dep->endpoint, &req->request);
        spin_lock(&dwc->lock);
+
+       if (dep->number > 1)
+               pm_runtime_put(dwc->dev);
 }
 
 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
 {
        u32             timeout = 500;
+       int             status = 0;
+       int             ret = 0;
        u32             reg;
 
-       trace_dwc3_gadget_generic_cmd(cmd, param);
-
        dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
        dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
 
        do {
                reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
                if (!(reg & DWC3_DGCMD_CMDACT)) {
-                       dwc3_trace(trace_dwc3_gadget,
-                                       "Command Complete --> %d",
-                                       DWC3_DGCMD_STATUS(reg));
-                       if (DWC3_DGCMD_STATUS(reg))
-                               return -EINVAL;
-                       return 0;
+                       status = DWC3_DGCMD_STATUS(reg);
+                       if (status)
+                               ret = -EINVAL;
+                       break;
                }
+       } while (timeout--);
 
-               /*
-                * We can't sleep here, because it's also called from
-                * interrupt context.
-                */
-               timeout--;
-               if (!timeout) {
-                       dwc3_trace(trace_dwc3_gadget,
-                                       "Command Timed Out");
-                       return -ETIMEDOUT;
-               }
-               udelay(1);
-       } while (1);
+       if (!timeout) {
+               ret = -ETIMEDOUT;
+               status = -ETIMEDOUT;
+       }
+
+       trace_dwc3_gadget_generic_cmd(cmd, param, status);
+
+       return ret;
 }
 
 static int __dwc3_gadget_wakeup(struct dwc3 *dwc);
 
-int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
-               unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
+int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+               struct dwc3_gadget_ep_cmd_params *params)
 {
-       struct dwc3_ep          *dep = dwc->eps[ep];
+       struct dwc3             *dwc = dep->dwc;
        u32                     timeout = 500;
        u32                     reg;
 
+       int                     cmd_status = 0;
        int                     susphy = false;
        int                     ret = -EINVAL;
 
-       trace_dwc3_gadget_ep_cmd(dep, cmd, params);
-
        /*
         * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
         * we're issuing an endpoint command, we must check if
@@ -258,11 +256,13 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
         * We will also set SUSPHY bit to what it was before returning as stated
         * by the same section on Synopsys databook.
         */
-       reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-       if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
-               susphy = true;
-               reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
-               dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+       if (dwc->gadget.speed <= USB_SPEED_HIGH) {
+               reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+               if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
+                       susphy = true;
+                       reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+                       dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+               }
        }
 
        if (cmd == DWC3_DEPCMD_STARTTRANSFER) {
@@ -279,26 +279,21 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
                }
        }
 
-       dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
-       dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
-       dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
+       dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
+       dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
+       dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
 
-       dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
+       dwc3_writel(dep->regs, DWC3_DEPCMD, cmd | DWC3_DEPCMD_CMDACT);
        do {
-               reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
+               reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
                if (!(reg & DWC3_DEPCMD_CMDACT)) {
-                       int cmd_status = DWC3_DEPCMD_STATUS(reg);
-
-                       dwc3_trace(trace_dwc3_gadget,
-                                       "Command Complete --> %d",
-                                       cmd_status);
+                       cmd_status = DWC3_DEPCMD_STATUS(reg);
 
                        switch (cmd_status) {
                        case 0:
                                ret = 0;
                                break;
                        case DEPEVT_TRANSFER_NO_RESOURCE:
-                               dwc3_trace(trace_dwc3_gadget, "%s: no resource available");
                                ret = -EINVAL;
                                break;
                        case DEPEVT_TRANSFER_BUS_EXPIRY:
@@ -313,7 +308,6 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
                                 * give a hint to the gadget driver that this is
                                 * the case by returning -EAGAIN.
                                 */
-                               dwc3_trace(trace_dwc3_gadget, "%s: bus expiry");
                                ret = -EAGAIN;
                                break;
                        default:
@@ -322,21 +316,14 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
 
                        break;
                }
+       } while (--timeout);
 
-               /*
-                * We can't sleep here, because it is also called from
-                * interrupt context.
-                */
-               timeout--;
-               if (!timeout) {
-                       dwc3_trace(trace_dwc3_gadget,
-                                       "Command Timed Out");
-                       ret = -ETIMEDOUT;
-                       break;
-               }
+       if (timeout == 0) {
+               ret = -ETIMEDOUT;
+               cmd_status = -ETIMEDOUT;
+       }
 
-               udelay(1);
-       } while (1);
+       trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
 
        if (unlikely(susphy)) {
                reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
@@ -366,7 +353,7 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
 
        memset(&params, 0, sizeof(params));
 
-       return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+       return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
 }
 
 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
@@ -454,7 +441,7 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
        memset(&params, 0x00, sizeof(params));
        cmd = DWC3_DEPCMD_DEPSTARTCFG;
 
-       ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
+       ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
        if (ret)
                return ret;
 
@@ -475,10 +462,14 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
 static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
                const struct usb_endpoint_descriptor *desc,
                const struct usb_ss_ep_comp_descriptor *comp_desc,
-               bool ignore, bool restore)
+               bool modify, bool restore)
 {
        struct dwc3_gadget_ep_cmd_params params;
 
+       if (dev_WARN_ONCE(dwc->dev, modify && restore,
+                                       "Can't modify and restore\n"))
+               return -EINVAL;
+
        memset(&params, 0x00, sizeof(params));
 
        params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
@@ -487,30 +478,22 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
        /* Burst size is only needed in SuperSpeed mode */
        if (dwc->gadget.speed >= USB_SPEED_SUPER) {
                u32 burst = dep->endpoint.maxburst;
-               u32 nump;
-               u32 reg;
-
-               /* update NumP */
-               reg = dwc3_readl(dwc->regs, DWC3_DCFG);
-               nump = DWC3_DCFG_NUMP(reg);
-               nump = max(nump, burst);
-               reg &= ~DWC3_DCFG_NUMP_MASK;
-               reg |= nump << DWC3_DCFG_NUMP_SHIFT;
-               dwc3_writel(dwc->regs, DWC3_DCFG, reg);
-
                params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1);
        }
 
-       if (ignore)
-               params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
-
-       if (restore) {
+       if (modify) {
+               params.param0 |= DWC3_DEPCFG_ACTION_MODIFY;
+       } else if (restore) {
                params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
                params.param2 |= dep->saved_state;
+       } else {
+               params.param0 |= DWC3_DEPCFG_ACTION_INIT;
        }
 
-       params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
-               | DWC3_DEPCFG_XFER_NOT_READY_EN;
+       params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN;
+
+       if (dep->number <= 1 || usb_endpoint_xfer_isoc(desc))
+               params.param1 |= DWC3_DEPCFG_XFER_NOT_READY_EN;
 
        if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
                params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
@@ -541,8 +524,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
                dep->interval = 1 << (desc->bInterval - 1);
        }
 
-       return dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                       DWC3_DEPCMD_SETEPCONFIG, &params);
+       return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
 }
 
 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
@@ -553,8 +535,8 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
 
        params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
 
-       return dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                       DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
+       return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
+                       &params);
 }
 
 /**
@@ -567,7 +549,7 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
 static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
                const struct usb_endpoint_descriptor *desc,
                const struct usb_ss_ep_comp_descriptor *comp_desc,
-               bool ignore, bool restore)
+               bool modify, bool restore)
 {
        struct dwc3             *dwc = dep->dwc;
        u32                     reg;
@@ -581,7 +563,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
                        return ret;
        }
 
-       ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
+       ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
                        restore);
        if (ret)
                return ret;
@@ -600,38 +582,24 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
                dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
 
                if (usb_endpoint_xfer_control(desc))
-                       goto out;
+                       return 0;
+
+               /* Initialize the TRB ring */
+               dep->trb_dequeue = 0;
+               dep->trb_enqueue = 0;
+               memset(dep->trb_pool, 0,
+                      sizeof(struct dwc3_trb) * DWC3_TRB_NUM);
 
                /* Link TRB. The HWO bit is never reset */
                trb_st_hw = &dep->trb_pool[0];
 
                trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
-               memset(trb_link, 0, sizeof(*trb_link));
-
                trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
                trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
                trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
                trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
        }
 
-out:
-       switch (usb_endpoint_type(desc)) {
-       case USB_ENDPOINT_XFER_CONTROL:
-               /* don't change name */
-               break;
-       case USB_ENDPOINT_XFER_ISOC:
-               strlcat(dep->name, "-isoc", sizeof(dep->name));
-               break;
-       case USB_ENDPOINT_XFER_BULK:
-               strlcat(dep->name, "-bulk", sizeof(dep->name));
-               break;
-       case USB_ENDPOINT_XFER_INT:
-               strlcat(dep->name, "-int", sizeof(dep->name));
-               break;
-       default:
-               dev_err(dwc->dev, "invalid endpoint transfer type\n");
-       }
-
        return 0;
 }
 
@@ -640,15 +608,13 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 {
        struct dwc3_request             *req;
 
-       if (!list_empty(&dep->started_list)) {
-               dwc3_stop_active_transfer(dwc, dep->number, true);
+       dwc3_stop_active_transfer(dwc, dep->number, true);
 
-               /* - giveback all requests to gadget driver */
-               while (!list_empty(&dep->started_list)) {
-                       req = next_request(&dep->started_list);
+       /* - giveback all requests to gadget driver */
+       while (!list_empty(&dep->started_list)) {
+               req = next_request(&dep->started_list);
 
-                       dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
-               }
+               dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
        }
 
        while (!list_empty(&dep->pending_list)) {
@@ -689,10 +655,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
        dep->type = 0;
        dep->flags = 0;
 
-       snprintf(dep->name, sizeof(dep->name), "ep%d%s",
-                       dep->number >> 1,
-                       (dep->number & 1) ? "in" : "out");
-
        return 0;
 }
 
@@ -784,6 +746,8 @@ static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
        req->epnum      = dep->number;
        req->dep        = dep;
 
+       dep->allocated_requests++;
+
        trace_dwc3_alloc_request(req);
 
        return &req->request;
@@ -793,7 +757,9 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
                struct usb_request *request)
 {
        struct dwc3_request             *req = to_dwc3_request(request);
+       struct dwc3_ep                  *dep = to_dwc3_ep(ep);
 
+       dep->allocated_requests--;
        trace_dwc3_free_request(req);
        kfree(req);
 }
@@ -825,9 +791,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
        }
 
        dwc3_ep_inc_enq(dep);
-       /* Skip the LINK-TRB */
-       if (dwc3_ep_is_last_trb(dep->trb_enqueue))
-               dwc3_ep_inc_enq(dep);
 
        trb->size = DWC3_TRB_SIZE_LENGTH(length);
        trb->bpl = lower_32_bits(dma);
@@ -877,137 +840,169 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
 
        trb->ctrl |= DWC3_TRB_CTRL_HWO;
 
+       dep->queued_requests++;
+
        trace_dwc3_prepare_trb(dep, trb);
 }
 
-/*
- * dwc3_prepare_trbs - setup TRBs from requests
- * @dep: endpoint for which requests are being prepared
- * @starting: true if the endpoint is idle and no requests are queued.
+/**
+ * dwc3_ep_prev_trb() - Returns the previous TRB in the ring
+ * @dep: The endpoint with the TRB ring
+ * @index: The index of the current TRB in the ring
  *
- * The function goes through the requests list and sets up TRBs for the
- * transfers. The function returns once there are no more TRBs available or
- * it runs out of requests.
+ * Returns the TRB prior to the one pointed to by the index. If the
+ * index is 0, we will wrap backwards, skip the link TRB, and return
+ * the one just before that.
  */
-static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
+static struct dwc3_trb *dwc3_ep_prev_trb(struct dwc3_ep *dep, u8 index)
 {
-       struct dwc3_request     *req, *n;
-       u32                     trbs_left;
-       unsigned int            last_one = 0;
+       if (!index)
+               index = DWC3_TRB_NUM - 2;
+       else
+               index = dep->trb_enqueue - 1;
 
-       BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
+       return &dep->trb_pool[index];
+}
 
-       trbs_left = dep->trb_dequeue - dep->trb_enqueue;
+static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep)
+{
+       struct dwc3_trb         *tmp;
+       u8                      trbs_left;
 
        /*
-        * If enqueue & dequeue are equal than it is either full or empty. If we
-        * are starting to process requests then we are empty. Otherwise we are
-        * full and don't do anything
+        * If enqueue & dequeue are equal than it is either full or empty.
+        *
+        * One way to know for sure is if the TRB right before us has HWO bit
+        * set or not. If it has, then we're definitely full and can't fit any
+        * more transfers in our ring.
         */
-       if (!trbs_left) {
-               if (!starting)
-                       return;
+       if (dep->trb_enqueue == dep->trb_dequeue) {
+               tmp = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
+               if (tmp->ctrl & DWC3_TRB_CTRL_HWO)
+                       return 0;
 
-               trbs_left = DWC3_TRB_NUM;
+               return DWC3_TRB_NUM - 1;
        }
 
-       /* The last TRB is a link TRB, not used for xfer */
-       if (trbs_left <= 1)
-               return;
+       trbs_left = dep->trb_dequeue - dep->trb_enqueue;
+       trbs_left &= (DWC3_TRB_NUM - 1);
 
-       list_for_each_entry_safe(req, n, &dep->pending_list, list) {
-               unsigned        length;
-               dma_addr_t      dma;
-               last_one = false;
-
-               if (req->request.num_mapped_sgs > 0) {
-                       struct usb_request *request = &req->request;
-                       struct scatterlist *sg = request->sg;
-                       struct scatterlist *s;
-                       int             i;
-
-                       for_each_sg(sg, s, request->num_mapped_sgs, i) {
-                               unsigned chain = true;
-
-                               length = sg_dma_len(s);
-                               dma = sg_dma_address(s);
-
-                               if (i == (request->num_mapped_sgs - 1) ||
-                                               sg_is_last(s)) {
-                                       if (list_empty(&dep->pending_list))
-                                               last_one = true;
-                                       chain = false;
-                               }
+       if (dep->trb_dequeue < dep->trb_enqueue)
+               trbs_left--;
 
-                               trbs_left--;
-                               if (!trbs_left)
-                                       last_one = true;
+       return trbs_left;
+}
 
-                               if (last_one)
-                                       chain = false;
+static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
+               struct dwc3_request *req, unsigned int trbs_left,
+               unsigned int more_coming)
+{
+       struct usb_request *request = &req->request;
+       struct scatterlist *sg = request->sg;
+       struct scatterlist *s;
+       unsigned int    last = false;
+       unsigned int    length;
+       dma_addr_t      dma;
+       int             i;
 
-                               dwc3_prepare_one_trb(dep, req, dma, length,
-                                               last_one, chain, i);
+       for_each_sg(sg, s, request->num_mapped_sgs, i) {
+               unsigned chain = true;
 
-                               if (last_one)
-                                       break;
-                       }
+               length = sg_dma_len(s);
+               dma = sg_dma_address(s);
 
-                       if (last_one)
-                               break;
-               } else {
-                       dma = req->request.dma;
-                       length = req->request.length;
-                       trbs_left--;
+               if (sg_is_last(s)) {
+                       if (usb_endpoint_xfer_int(dep->endpoint.desc) ||
+                               !more_coming)
+                               last = true;
 
-                       if (!trbs_left)
-                               last_one = 1;
+                       chain = false;
+               }
 
-                       /* Is this the last request? */
-                       if (list_is_last(&req->list, &dep->pending_list))
-                               last_one = 1;
+               if (!trbs_left--)
+                       last = true;
 
-                       dwc3_prepare_one_trb(dep, req, dma, length,
-                                       last_one, false, 0);
+               if (last)
+                       chain = false;
 
-                       if (last_one)
-                               break;
-               }
+               dwc3_prepare_one_trb(dep, req, dma, length,
+                               last, chain, i);
+
+               if (last)
+                       break;
        }
 }
 
-static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
-               int start_new)
+static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
+               struct dwc3_request *req, unsigned int trbs_left,
+               unsigned int more_coming)
+{
+       unsigned int    last = false;
+       unsigned int    length;
+       dma_addr_t      dma;
+
+       dma = req->request.dma;
+       length = req->request.length;
+
+       if (!trbs_left)
+               last = true;
+
+       /* Is this the last request? */
+       if (usb_endpoint_xfer_int(dep->endpoint.desc) || !more_coming)
+               last = true;
+
+       dwc3_prepare_one_trb(dep, req, dma, length,
+                       last, false, 0);
+}
+
+/*
+ * dwc3_prepare_trbs - setup TRBs from requests
+ * @dep: endpoint for which requests are being prepared
+ *
+ * The function goes through the requests list and sets up TRBs for the
+ * transfers. The function returns once there are no more TRBs available or
+ * it runs out of requests.
+ */
+static void dwc3_prepare_trbs(struct dwc3_ep *dep)
+{
+       struct dwc3_request     *req, *n;
+       unsigned int            more_coming;
+       u32                     trbs_left;
+
+       BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
+
+       trbs_left = dwc3_calc_trbs_left(dep);
+       if (!trbs_left)
+               return;
+
+       more_coming = dep->allocated_requests - dep->queued_requests;
+
+       list_for_each_entry_safe(req, n, &dep->pending_list, list) {
+               if (req->request.num_mapped_sgs > 0)
+                       dwc3_prepare_one_trb_sg(dep, req, trbs_left--,
+                                       more_coming);
+               else
+                       dwc3_prepare_one_trb_linear(dep, req, trbs_left--,
+                                       more_coming);
+
+               if (!trbs_left)
+                       return;
+       }
+}
+
+static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
 {
        struct dwc3_gadget_ep_cmd_params params;
        struct dwc3_request             *req;
        struct dwc3                     *dwc = dep->dwc;
+       int                             starting;
        int                             ret;
        u32                             cmd;
 
-       if (start_new && (dep->flags & DWC3_EP_BUSY)) {
-               dwc3_trace(trace_dwc3_gadget, "%s: endpoint busy", dep->name);
-               return -EBUSY;
-       }
-
-       /*
-        * If we are getting here after a short-out-packet we don't enqueue any
-        * new requests as we try to set the IOC bit only on the last request.
-        */
-       if (start_new) {
-               if (list_empty(&dep->started_list))
-                       dwc3_prepare_trbs(dep, start_new);
-
-               /* req points to the first request which will be sent */
-               req = next_request(&dep->started_list);
-       } else {
-               dwc3_prepare_trbs(dep, start_new);
+       starting = !(dep->flags & DWC3_EP_BUSY);
 
-               /*
-                * req points to the first request where HWO changed from 0 to 1
-                */
-               req = next_request(&dep->started_list);
-       }
+       dwc3_prepare_trbs(dep);
+       req = next_request(&dep->started_list);
        if (!req) {
                dep->flags |= DWC3_EP_PENDING_REQUEST;
                return 0;
@@ -1015,16 +1010,17 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
 
        memset(&params, 0, sizeof(params));
 
-       if (start_new) {
+       if (starting) {
                params.param0 = upper_32_bits(req->trb_dma);
                params.param1 = lower_32_bits(req->trb_dma);
-               cmd = DWC3_DEPCMD_STARTTRANSFER;
+               cmd = DWC3_DEPCMD_STARTTRANSFER |
+                       DWC3_DEPCMD_PARAM(cmd_param);
        } else {
-               cmd = DWC3_DEPCMD_UPDATETRANSFER;
+               cmd = DWC3_DEPCMD_UPDATETRANSFER |
+                       DWC3_DEPCMD_PARAM(dep->resource_index);
        }
 
-       cmd |= DWC3_DEPCMD_PARAM(cmd_param);
-       ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+       ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
        if (ret < 0) {
                /*
                 * FIXME we need to iterate over the list of requests
@@ -1039,9 +1035,8 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
 
        dep->flags |= DWC3_EP_BUSY;
 
-       if (start_new) {
-               dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
-                               dep->number);
+       if (starting) {
+               dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
                WARN_ON_ONCE(!dep->resource_index);
        }
 
@@ -1064,7 +1059,7 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
        /* 4 micro frames in the future */
        uf = cur_uf + dep->interval * 4;
 
-       __dwc3_gadget_kick_transfer(dep, uf, 1);
+       __dwc3_gadget_kick_transfer(dep, uf);
 }
 
 static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
@@ -1085,18 +1080,20 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
 
        if (!dep->endpoint.desc) {
                dwc3_trace(trace_dwc3_gadget,
-                               "trying to queue request %p to disabled %s\n",
+                               "trying to queue request %p to disabled %s",
                                &req->request, dep->endpoint.name);
                return -ESHUTDOWN;
        }
 
        if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
                                &req->request, req->dep->name)) {
-               dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'\n",
+               dwc3_trace(trace_dwc3_gadget, "request %p belongs to '%s'",
                                &req->request, req->dep->name);
                return -EINVAL;
        }
 
+       pm_runtime_get(dwc->dev);
+
        req->request.actual     = 0;
        req->request.status     = -EINPROGRESS;
        req->direction          = dep->direction;
@@ -1131,9 +1128,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
         * little bit faster.
         */
        if (!usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
-                       !usb_endpoint_xfer_int(dep->endpoint.desc) &&
-                       !(dep->flags & DWC3_EP_BUSY)) {
-               ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+                       !usb_endpoint_xfer_int(dep->endpoint.desc)) {
+               ret = __dwc3_gadget_kick_transfer(dep, 0);
                goto out;
        }
 
@@ -1163,7 +1159,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
                        return 0;
                }
 
-               ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+               ret = __dwc3_gadget_kick_transfer(dep, 0);
                if (!ret)
                        dep->flags &= ~DWC3_EP_PENDING_REQUEST;
 
@@ -1179,8 +1175,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
                        (dep->flags & DWC3_EP_BUSY) &&
                        !(dep->flags & DWC3_EP_MISSED_ISOC)) {
                WARN_ON_ONCE(!dep->resource_index);
-               ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
-                               false);
+               ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index);
                goto out;
        }
 
@@ -1190,12 +1185,12 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
         * handled.
         */
        if (dep->stream_capable)
-               ret = __dwc3_gadget_kick_transfer(dep, 0, true);
+               ret = __dwc3_gadget_kick_transfer(dep, 0);
 
 out:
        if (ret && ret != -EBUSY)
                dwc3_trace(trace_dwc3_gadget,
-                               "%s: failed to kick transfers\n",
+                               "%s: failed to kick transfers",
                                dep->name);
        if (ret == -EBUSY)
                ret = 0;
@@ -1215,7 +1210,7 @@ static int __dwc3_gadget_ep_queue_zlp(struct dwc3 *dwc, struct dwc3_ep *dep)
        struct usb_request              *request;
        struct usb_ep                   *ep = &dep->endpoint;
 
-       dwc3_trace(trace_dwc3_gadget, "queueing ZLP\n");
+       dwc3_trace(trace_dwc3_gadget, "queueing ZLP");
        request = dwc3_gadget_ep_alloc_request(ep, GFP_ATOMIC);
        if (!request)
                return -ENOMEM;
@@ -1319,23 +1314,36 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
        memset(&params, 0x00, sizeof(params));
 
        if (value) {
-               if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
-                               (!list_empty(&dep->started_list) ||
-                                !list_empty(&dep->pending_list)))) {
+               struct dwc3_trb *trb;
+
+               unsigned transfer_in_flight;
+               unsigned started;
+
+               if (dep->number > 1)
+                       trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
+               else
+                       trb = &dwc->ep0_trb[dep->trb_enqueue];
+
+               transfer_in_flight = trb->ctrl & DWC3_TRB_CTRL_HWO;
+               started = !list_empty(&dep->started_list);
+
+               if (!protocol && ((dep->direction && transfer_in_flight) ||
+                               (!dep->direction && started))) {
                        dwc3_trace(trace_dwc3_gadget,
                                        "%s: pending request, cannot halt",
                                        dep->name);
                        return -EAGAIN;
                }
 
-               ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                       DWC3_DEPCMD_SETSTALL, &params);
+               ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETSTALL,
+                               &params);
                if (ret)
                        dev_err(dwc->dev, "failed to set STALL on %s\n",
                                        dep->name);
                else
                        dep->flags |= DWC3_EP_STALL;
        } else {
+
                ret = dwc3_send_clear_stall_ep_cmd(dep);
                if (ret)
                        dev_err(dwc->dev, "failed to clear STALL on %s\n",
@@ -1444,8 +1452,8 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
        speed = reg & DWC3_DSTS_CONNECTSPD;
        if ((speed == DWC3_DSTS_SUPERSPEED) ||
            (speed == DWC3_DSTS_SUPERSPEED_PLUS)) {
-               dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed\n");
-               return -EINVAL;
+               dwc3_trace(trace_dwc3_gadget, "no wakeup on SuperSpeed");
+               return 0;
        }
 
        link_state = DWC3_DSTS_USBLNKST(reg);
@@ -1456,7 +1464,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
                break;
        default:
                dwc3_trace(trace_dwc3_gadget,
-                               "can't wakeup from '%s'\n",
+                               "can't wakeup from '%s'",
                                dwc3_gadget_link_string(link_state));
                return -EINVAL;
        }
@@ -1525,6 +1533,9 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
        u32                     reg;
        u32                     timeout = 500;
 
+       if (pm_runtime_suspended(dwc->dev))
+               return 0;
+
        reg = dwc3_readl(dwc->regs, DWC3_DCTL);
        if (is_on) {
                if (dwc->revision <= DWC3_REVISION_187A) {
@@ -1553,18 +1564,11 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 
        do {
                reg = dwc3_readl(dwc->regs, DWC3_DSTS);
-               if (is_on) {
-                       if (!(reg & DWC3_DSTS_DEVCTRLHLT))
-                               break;
-               } else {
-                       if (reg & DWC3_DSTS_DEVCTRLHLT)
-                               break;
-               }
-               timeout--;
-               if (!timeout)
-                       return -ETIMEDOUT;
-               udelay(1);
-       } while (1);
+               reg &= DWC3_DSTS_DEVCTRLHLT;
+       } while (--timeout && !(!is_on ^ !reg));
+
+       if (!timeout)
+               return -ETIMEDOUT;
 
        dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
                        dwc->gadget_driver
@@ -1616,36 +1620,52 @@ static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
 static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
 
-static int dwc3_gadget_start(struct usb_gadget *g,
-               struct usb_gadget_driver *driver)
+/**
+ * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
+ * dwc: pointer to our context structure
+ *
+ * The following looks like complex but it's actually very simple. In order to
+ * calculate the number of packets we can burst at once on OUT transfers, we're
+ * gonna use RxFIFO size.
+ *
+ * To calculate RxFIFO size we need two numbers:
+ * MDWIDTH = size, in bits, of the internal memory bus
+ * RAM2_DEPTH = depth, in MDWIDTH, of internal RAM2 (where RxFIFO sits)
+ *
+ * Given these two numbers, the formula is simple:
+ *
+ * RxFIFO Size = (RAM2_DEPTH * MDWIDTH / 8) - 24 - 16;
+ *
+ * 24 bytes is for 3x SETUP packets
+ * 16 bytes is a clock domain crossing tolerance
+ *
+ * Given RxFIFO Size, NUMP = RxFIFOSize / 1024;
+ */
+static void dwc3_gadget_setup_nump(struct dwc3 *dwc)
 {
-       struct dwc3             *dwc = gadget_to_dwc(g);
-       struct dwc3_ep          *dep;
-       unsigned long           flags;
-       int                     ret = 0;
-       int                     irq;
-       u32                     reg;
+       u32 ram2_depth;
+       u32 mdwidth;
+       u32 nump;
+       u32 reg;
 
-       irq = platform_get_irq(to_platform_device(dwc->dev), 0);
-       ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
-                       IRQF_SHARED, "dwc3", dwc->ev_buf);
-       if (ret) {
-               dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
-                               irq, ret);
-               goto err0;
-       }
+       ram2_depth = DWC3_GHWPARAMS7_RAM2_DEPTH(dwc->hwparams.hwparams7);
+       mdwidth = DWC3_GHWPARAMS0_MDWIDTH(dwc->hwparams.hwparams0);
 
-       spin_lock_irqsave(&dwc->lock, flags);
+       nump = ((ram2_depth * mdwidth / 8) - 24 - 16) / 1024;
+       nump = min_t(u32, nump, 16);
 
-       if (dwc->gadget_driver) {
-               dev_err(dwc->dev, "%s is already bound to %s\n",
-                               dwc->gadget.name,
-                               dwc->gadget_driver->driver.name);
-               ret = -EBUSY;
-               goto err1;
-       }
+       /* update NumP */
+       reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+       reg &= ~DWC3_DCFG_NUMP_MASK;
+       reg |= nump << DWC3_DCFG_NUMP_SHIFT;
+       dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
 
-       dwc->gadget_driver      = driver;
+static int __dwc3_gadget_start(struct dwc3 *dwc)
+{
+       struct dwc3_ep          *dep;
+       int                     ret = 0;
+       u32                     reg;
 
        reg = dwc3_readl(dwc->regs, DWC3_DCFG);
        reg &= ~(DWC3_DCFG_SPEED_MASK);
@@ -1668,16 +1688,16 @@ static int dwc3_gadget_start(struct usb_gadget *g,
        } else {
                switch (dwc->maximum_speed) {
                case USB_SPEED_LOW:
-                       reg |= DWC3_DSTS_LOWSPEED;
+                       reg |= DWC3_DCFG_LOWSPEED;
                        break;
                case USB_SPEED_FULL:
-                       reg |= DWC3_DSTS_FULLSPEED1;
+                       reg |= DWC3_DCFG_FULLSPEED1;
                        break;
                case USB_SPEED_HIGH:
-                       reg |= DWC3_DSTS_HIGHSPEED;
+                       reg |= DWC3_DCFG_HIGHSPEED;
                        break;
                case USB_SPEED_SUPER_PLUS:
-                       reg |= DWC3_DSTS_SUPERSPEED_PLUS;
+                       reg |= DWC3_DCFG_SUPERSPEED_PLUS;
                        break;
                default:
                        dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
@@ -1701,6 +1721,8 @@ static int dwc3_gadget_start(struct usb_gadget *g,
        reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
        dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
 
+       dwc3_gadget_setup_nump(dwc);
+
        /* Start with SuperSpeed Default */
        dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 
@@ -1709,7 +1731,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
                        false);
        if (ret) {
                dev_err(dwc->dev, "failed to enable %s\n", dep->name);
-               goto err2;
+               goto err0;
        }
 
        dep = dwc->eps[1];
@@ -1717,7 +1739,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
                        false);
        if (ret) {
                dev_err(dwc->dev, "failed to enable %s\n", dep->name);
-               goto err3;
+               goto err1;
        }
 
        /* begin to receive SETUP packets */
@@ -1726,43 +1748,79 @@ static int dwc3_gadget_start(struct usb_gadget *g,
 
        dwc3_gadget_enable_irq(dwc);
 
-       spin_unlock_irqrestore(&dwc->lock, flags);
-
        return 0;
 
-err3:
-       __dwc3_gadget_ep_disable(dwc->eps[0]);
-
-err2:
-       dwc->gadget_driver = NULL;
-
 err1:
-       spin_unlock_irqrestore(&dwc->lock, flags);
-
-       free_irq(irq, dwc->ev_buf);
+       __dwc3_gadget_ep_disable(dwc->eps[0]);
 
 err0:
        return ret;
 }
 
-static int dwc3_gadget_stop(struct usb_gadget *g)
+static int dwc3_gadget_start(struct usb_gadget *g,
+               struct usb_gadget_driver *driver)
 {
        struct dwc3             *dwc = gadget_to_dwc(g);
        unsigned long           flags;
+       int                     ret = 0;
        int                     irq;
 
+       irq = dwc->irq_gadget;
+       ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
+                       IRQF_SHARED, "dwc3", dwc->ev_buf);
+       if (ret) {
+               dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+                               irq, ret);
+               goto err0;
+       }
+
        spin_lock_irqsave(&dwc->lock, flags);
+       if (dwc->gadget_driver) {
+               dev_err(dwc->dev, "%s is already bound to %s\n",
+                               dwc->gadget.name,
+                               dwc->gadget_driver->driver.name);
+               ret = -EBUSY;
+               goto err1;
+       }
+
+       dwc->gadget_driver      = driver;
+
+       if (pm_runtime_active(dwc->dev))
+               __dwc3_gadget_start(dwc);
+
+       spin_unlock_irqrestore(&dwc->lock, flags);
+
+       return 0;
+
+err1:
+       spin_unlock_irqrestore(&dwc->lock, flags);
+       free_irq(irq, dwc);
+
+err0:
+       return ret;
+}
+
+static void __dwc3_gadget_stop(struct dwc3 *dwc)
+{
+       if (pm_runtime_suspended(dwc->dev))
+               return;
 
        dwc3_gadget_disable_irq(dwc);
        __dwc3_gadget_ep_disable(dwc->eps[0]);
        __dwc3_gadget_ep_disable(dwc->eps[1]);
+}
 
-       dwc->gadget_driver      = NULL;
+static int dwc3_gadget_stop(struct usb_gadget *g)
+{
+       struct dwc3             *dwc = gadget_to_dwc(g);
+       unsigned long           flags;
 
+       spin_lock_irqsave(&dwc->lock, flags);
+       __dwc3_gadget_stop(dwc);
+       dwc->gadget_driver      = NULL;
        spin_unlock_irqrestore(&dwc->lock, flags);
 
-       irq = platform_get_irq(to_platform_device(dwc->dev), 0);
-       free_irq(irq, dwc->ev_buf);
+       free_irq(dwc->irq_gadget, dwc->ev_buf);
 
        return 0;
 }
@@ -1785,7 +1843,7 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
        u8                              i;
 
        for (i = 0; i < num; i++) {
-               u8 epnum = (i << 1) | (!!direction);
+               u8 epnum = (i << 1) | (direction ? 1 : 0);
 
                dep = kzalloc(sizeof(*dep), GFP_KERNEL);
                if (!dep)
@@ -1794,12 +1852,14 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
                dep->dwc = dwc;
                dep->number = epnum;
                dep->direction = !!direction;
+               dep->regs = dwc->regs + DWC3_DEP_BASE(epnum);
                dwc->eps[epnum] = dep;
 
                snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
                                (epnum & 1) ? "in" : "out");
 
                dep->endpoint.name = dep->name;
+               spin_lock_init(&dep->lock);
 
                dwc3_trace(trace_dwc3_gadget, "initializing %s", dep->name);
 
@@ -1901,6 +1961,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
        unsigned int            s_pkt = 0;
        unsigned int            trb_status;
 
+       dep->queued_requests--;
        trace_dwc3_complete_trb(dep, trb);
 
        if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
@@ -1921,7 +1982,7 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
                        trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
                        if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
                                dwc3_trace(trace_dwc3_gadget,
-                                               "%s: incomplete IN transfer\n",
+                                               "%s: incomplete IN transfer",
                                                dep->name);
                                /*
                                 * If missed isoc occurred and there is
@@ -2006,6 +2067,14 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
                        break;
        } while (1);
 
+       /*
+        * Our endpoint might get disabled by another thread during
+        * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
+        * early on so DWC3_EP_BUSY flag gets cleared
+        */
+       if (!dep->endpoint.desc)
+               return 1;
+
        if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
                        list_empty(&dep->started_list)) {
                if (list_empty(&dep->pending_list)) {
@@ -2023,6 +2092,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
                return 1;
        }
 
+       if (usb_endpoint_xfer_isoc(dep->endpoint.desc))
+               if ((event->status & DEPEVT_STATUS_IOC) &&
+                               (trb->ctrl & DWC3_TRB_CTRL_IOC))
+                       return 0;
        return 1;
 }
 
@@ -2039,7 +2112,7 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
                status = -ECONNRESET;
 
        clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
-       if (clean_busy && (is_xfer_complete ||
+       if (clean_busy && (!dep->endpoint.desc || is_xfer_complete ||
                                usb_endpoint_xfer_isoc(dep->endpoint.desc)))
                dep->flags &= ~DWC3_EP_BUSY;
 
@@ -2068,10 +2141,18 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
                dwc->u1u2 = 0;
        }
 
+       /*
+        * Our endpoint might get disabled by another thread during
+        * dwc3_gadget_giveback(). If that happens, we're just gonna return 1
+        * early on so DWC3_EP_BUSY flag gets cleared
+        */
+       if (!dep->endpoint.desc)
+               return;
+
        if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
                int ret;
 
-               ret = __dwc3_gadget_kick_transfer(dep, 0, is_xfer_complete);
+               ret = __dwc3_gadget_kick_transfer(dep, 0);
                if (!ret || ret == -EBUSY)
                        return;
        }
@@ -2099,7 +2180,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
 
                if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
                        dwc3_trace(trace_dwc3_gadget,
-                                       "%s is an Isochronous endpoint\n",
+                                       "%s is an Isochronous endpoint",
                                        dep->name);
                        return;
                }
@@ -2122,12 +2203,12 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
                                        dep->name, active ? "Transfer Active"
                                        : "Transfer Not Active");
 
-                       ret = __dwc3_gadget_kick_transfer(dep, 0, !active);
+                       ret = __dwc3_gadget_kick_transfer(dep, 0);
                        if (!ret || ret == -EBUSY)
                                return;
 
                        dwc3_trace(trace_dwc3_gadget,
-                                       "%s: failed to kick transfers\n",
+                                       "%s: failed to kick transfers",
                                        dep->name);
                }
 
@@ -2150,11 +2231,11 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
                        /* FALLTHROUGH */
                default:
                        dwc3_trace(trace_dwc3_gadget,
-                                       "unable to find suitable stream\n");
+                                       "unable to find suitable stream");
                }
                break;
        case DWC3_DEPEVT_RXTXFIFOEVT:
-               dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun\n", dep->name);
+               dwc3_trace(trace_dwc3_gadget, "%s FIFO Overrun", dep->name);
                break;
        case DWC3_DEPEVT_EPCMDCMPLT:
                dwc3_trace(trace_dwc3_gadget, "Endpoint Command Complete");
@@ -2237,7 +2318,7 @@ static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
        cmd |= DWC3_DEPCMD_CMDIOC;
        cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
        memset(&params, 0, sizeof(params));
-       ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+       ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
        WARN_ON_ONCE(ret);
        dep->resource_index = 0;
        dep->flags &= ~DWC3_EP_BUSY;
@@ -2300,12 +2381,16 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
        dwc->gadget.speed = USB_SPEED_UNKNOWN;
        dwc->setup_packet_pending = false;
        usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
+
+       dwc->connected = false;
 }
 
 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 {
        u32                     reg;
 
+       dwc->connected = true;
+
        /*
         * WORKAROUND: DWC3 revisions <1.88a have an issue which
         * would cause a missing Disconnect Event if there's a
@@ -2393,12 +2478,12 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
        dwc3_update_ram_clk_sel(dwc, speed);
 
        switch (speed) {
-       case DWC3_DCFG_SUPERSPEED_PLUS:
+       case DWC3_DSTS_SUPERSPEED_PLUS:
                dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
                dwc->gadget.ep0->maxpacket = 512;
                dwc->gadget.speed = USB_SPEED_SUPER_PLUS;
                break;
-       case DWC3_DCFG_SUPERSPEED:
+       case DWC3_DSTS_SUPERSPEED:
                /*
                 * WORKAROUND: DWC3 revisions <1.90a have an issue which
                 * would cause a missing USB3 Reset event.
@@ -2419,18 +2504,18 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
                dwc->gadget.ep0->maxpacket = 512;
                dwc->gadget.speed = USB_SPEED_SUPER;
                break;
-       case DWC3_DCFG_HIGHSPEED:
+       case DWC3_DSTS_HIGHSPEED:
                dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
                dwc->gadget.ep0->maxpacket = 64;
                dwc->gadget.speed = USB_SPEED_HIGH;
                break;
-       case DWC3_DCFG_FULLSPEED2:
-       case DWC3_DCFG_FULLSPEED1:
+       case DWC3_DSTS_FULLSPEED2:
+       case DWC3_DSTS_FULLSPEED1:
                dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
                dwc->gadget.ep0->maxpacket = 64;
                dwc->gadget.speed = USB_SPEED_FULL;
                break;
-       case DWC3_DCFG_LOWSPEED:
+       case DWC3_DSTS_LOWSPEED:
                dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
                dwc->gadget.ep0->maxpacket = 8;
                dwc->gadget.speed = USB_SPEED_LOW;
@@ -2440,8 +2525,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
        /* Enable USB2 LPM Capability */
 
        if ((dwc->revision > DWC3_REVISION_194A) &&
-           (speed != DWC3_DCFG_SUPERSPEED) &&
-           (speed != DWC3_DCFG_SUPERSPEED_PLUS)) {
+           (speed != DWC3_DSTS_SUPERSPEED) &&
+           (speed != DWC3_DSTS_SUPERSPEED_PLUS)) {
                reg = dwc3_readl(dwc->regs, DWC3_DCFG);
                reg |= DWC3_DCFG_LPM_CAP;
                dwc3_writel(dwc->regs, DWC3_DCFG, reg);
@@ -2610,6 +2695,17 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
        dwc->link_state = next;
 }
 
+static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
+                                         unsigned int evtinfo)
+{
+       enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+
+       if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
+               dwc3_suspend_gadget(dwc);
+
+       dwc->link_state = next;
+}
+
 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
                unsigned int evtinfo)
 {
@@ -2661,7 +2757,20 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
                dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
                break;
        case DWC3_DEVICE_EVENT_EOPF:
-               dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
+               /* It changed to be suspend event for version 2.30a and above */
+               if (dwc->revision < DWC3_REVISION_230A) {
+                       dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
+               } else {
+                       dwc3_trace(trace_dwc3_gadget, "U3/L1-L2 Suspend Event");
+
+                       /*
+                        * Ignore suspend event until the gadget enters into
+                        * USB_STATE_CONFIGURED state.
+                        */
+                       if (dwc->gadget.state >= USB_STATE_CONFIGURED)
+                               dwc3_gadget_suspend_interrupt(dwc,
+                                               event->event_info);
+               }
                break;
        case DWC3_DEVICE_EVENT_SOF:
                dwc3_trace(trace_dwc3_gadget, "Start of Periodic Frame");
@@ -2767,6 +2876,13 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
        u32 count;
        u32 reg;
 
+       if (pm_runtime_suspended(dwc->dev)) {
+               pm_runtime_get(dwc->dev);
+               disable_irq_nosync(dwc->irq_gadget);
+               dwc->pending_events = true;
+               return IRQ_HANDLED;
+       }
+
        count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
        count &= DWC3_GEVNTCOUNT_MASK;
        if (!count)
@@ -2798,7 +2914,33 @@ static irqreturn_t dwc3_interrupt(int irq, void *_evt)
  */
 int dwc3_gadget_init(struct dwc3 *dwc)
 {
-       int                                     ret;
+       int ret, irq;
+       struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
+
+       irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+       if (irq == -EPROBE_DEFER)
+               return irq;
+
+       if (irq <= 0) {
+               irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+               if (irq == -EPROBE_DEFER)
+                       return irq;
+
+               if (irq <= 0) {
+                       irq = platform_get_irq(dwc3_pdev, 0);
+                       if (irq <= 0) {
+                               if (irq != -EPROBE_DEFER) {
+                                       dev_err(dwc->dev,
+                                               "missing peripheral IRQ\n");
+                               }
+                               if (!irq)
+                                       irq = -EINVAL;
+                               return irq;
+                       }
+               }
+       }
+
+       dwc->irq_gadget = irq;
 
        dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
                        &dwc->ctrl_req_addr, GFP_KERNEL);
@@ -2861,7 +3003,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
         */
        if (dwc->revision < DWC3_REVISION_220A)
                dwc3_trace(trace_dwc3_gadget,
-                               "Changing max_speed on rev %08x\n",
+                               "Changing max_speed on rev %08x",
                                dwc->revision);
 
        dwc->gadget.max_speed           = dwc->maximum_speed;
@@ -2935,61 +3077,50 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
 
 int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
+       int ret;
+
        if (!dwc->gadget_driver)
                return 0;
 
-       if (dwc->pullups_connected) {
-               dwc3_gadget_disable_irq(dwc);
-               dwc3_gadget_run_stop(dwc, true, true);
-       }
-
-       __dwc3_gadget_ep_disable(dwc->eps[0]);
-       __dwc3_gadget_ep_disable(dwc->eps[1]);
+       ret = dwc3_gadget_run_stop(dwc, false, false);
+       if (ret < 0)
+               return ret;
 
-       dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
+       dwc3_disconnect_gadget(dwc);
+       __dwc3_gadget_stop(dwc);
 
        return 0;
 }
 
 int dwc3_gadget_resume(struct dwc3 *dwc)
 {
-       struct dwc3_ep          *dep;
        int                     ret;
 
        if (!dwc->gadget_driver)
                return 0;
 
-       /* Start with SuperSpeed Default */
-       dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
-
-       dep = dwc->eps[0];
-       ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
-                       false);
-       if (ret)
+       ret = __dwc3_gadget_start(dwc);
+       if (ret < 0)
                goto err0;
 
-       dep = dwc->eps[1];
-       ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
-                       false);
-       if (ret)
+       ret = dwc3_gadget_run_stop(dwc, true, false);
+       if (ret < 0)
                goto err1;
 
-       /* begin to receive SETUP packets */
-       dwc->ep0state = EP0_SETUP_PHASE;
-       dwc3_ep0_out_start(dwc);
-
-       dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
-
-       if (dwc->pullups_connected) {
-               dwc3_gadget_enable_irq(dwc);
-               dwc3_gadget_run_stop(dwc, true, false);
-       }
-
        return 0;
 
 err1:
-       __dwc3_gadget_ep_disable(dwc->eps[0]);
+       __dwc3_gadget_stop(dwc);
 
 err0:
        return ret;
 }
+
+void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
+{
+       if (dwc->pending_events) {
+               dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf);
+               dwc->pending_events = false;
+               enable_irq(dwc->irq_gadget);
+       }
+}
index f21c0fccbebdf92545c9f1ff7b8ae6415acc130b..e4a1d974a5aea209fbea745280bb70cada700883 100644 (file)
@@ -95,11 +95,11 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
  *
  * Caller should take care of locking
  */
-static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3 *dwc, u8 number)
+static inline u32 dwc3_gadget_ep_get_transfer_index(struct dwc3_ep *dep)
 {
        u32                     res_id;
 
-       res_id = dwc3_readl(dwc->regs, DWC3_DEPCMD(number));
+       res_id = dwc3_readl(dep->regs, DWC3_DEPCMD);
 
        return DWC3_DEPCMD_GET_RSC_IDX(res_id);
 }
index c679f63783aec8c1a0fe4e43bef80546a55abf44..f6533c68fed1e0dcc73b1af4b2f7948a6313fb7e 100644 (file)
  */
 
 #include <linux/platform_device.h>
-#include <linux/usb/xhci_pdriver.h>
 
 #include "core.h"
 
 int dwc3_host_init(struct dwc3 *dwc)
 {
+       struct property_entry   props[2];
        struct platform_device  *xhci;
-       struct usb_xhci_pdata   pdata;
-       int                     ret;
+       int                     ret, irq;
+       struct resource         *res;
+       struct platform_device  *dwc3_pdev = to_platform_device(dwc->dev);
+
+       irq = platform_get_irq_byname(dwc3_pdev, "host");
+       if (irq == -EPROBE_DEFER)
+               return irq;
+
+       if (irq <= 0) {
+               irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+               if (irq == -EPROBE_DEFER)
+                       return irq;
+
+               if (irq <= 0) {
+                       irq = platform_get_irq(dwc3_pdev, 0);
+                       if (irq <= 0) {
+                               if (irq != -EPROBE_DEFER) {
+                                       dev_err(dwc->dev,
+                                               "missing host IRQ\n");
+                               }
+                               if (!irq)
+                                       irq = -EINVAL;
+                               return irq;
+                       } else {
+                               res = platform_get_resource(dwc3_pdev,
+                                                           IORESOURCE_IRQ, 0);
+                       }
+               } else {
+                       res = platform_get_resource_byname(dwc3_pdev,
+                                                          IORESOURCE_IRQ,
+                                                          "dwc_usb3");
+               }
+
+       } else {
+               res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
+                                                  "host");
+       }
+
+       dwc->xhci_resources[1].start = irq;
+       dwc->xhci_resources[1].end = irq;
+       dwc->xhci_resources[1].flags = res->flags;
+       dwc->xhci_resources[1].name = res->name;
 
        xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
        if (!xhci) {
@@ -47,14 +87,15 @@ int dwc3_host_init(struct dwc3 *dwc)
                goto err1;
        }
 
-       memset(&pdata, 0, sizeof(pdata));
-
-       pdata.usb3_lpm_capable = dwc->usb3_lpm_capable;
+       memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
 
-       ret = platform_device_add_data(xhci, &pdata, sizeof(pdata));
-       if (ret) {
-               dev_err(dwc->dev, "couldn't add platform data to xHCI device\n");
-               goto err1;
+       if (dwc->usb3_lpm_capable) {
+               props[0].name = "usb3-lpm-capable";
+               ret = platform_device_add_properties(xhci, props);
+               if (ret) {
+                       dev_err(dwc->dev, "failed to add properties to xHCI\n");
+                       goto err1;
+               }
        }
 
        phy_create_lookup(dwc->usb2_generic_phy, "usb2-phy",
index 6a79c8e66bbcce87ae1b0d7f38bd345687504894..a06f9a8fecc701c99c370150416bdf5351b4d3d8 100644 (file)
@@ -26,7 +26,6 @@
 
 static inline u32 dwc3_readl(void __iomem *base, u32 offset)
 {
-       u32 offs = offset - DWC3_GLOBALS_REGS_START;
        u32 value;
 
        /*
@@ -34,7 +33,7 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
         * space, see dwc3_probe in core.c.
         * However, the offsets are given starting from xHCI address space.
         */
-       value = readl(base + offs);
+       value = readl(base + offset - DWC3_GLOBALS_REGS_START);
 
        /*
         * When tracing we want to make it easy to find the correct address on
@@ -49,14 +48,12 @@ static inline u32 dwc3_readl(void __iomem *base, u32 offset)
 
 static inline void dwc3_writel(void __iomem *base, u32 offset, u32 value)
 {
-       u32 offs = offset - DWC3_GLOBALS_REGS_START;
-
        /*
         * We requested the mem region starting from the Globals address
         * space, see dwc3_probe in core.c.
         * However, the offsets are given starting from xHCI address space.
         */
-       writel(value, base + offs);
+       writel(value, base + offset - DWC3_GLOBALS_REGS_START);
 
        /*
         * When tracing we want to make it easy to find the correct address on
diff --git a/drivers/usb/dwc3/platform_data.h b/drivers/usb/dwc3/platform_data.h
deleted file mode 100644 (file)
index 8826cca..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * platform_data.h - USB DWC3 Platform Data Support
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
- * Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2  of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/otg.h>
-
-struct dwc3_platform_data {
-       enum usb_device_speed maximum_speed;
-       enum usb_dr_mode dr_mode;
-       bool usb3_lpm_capable;
-
-       unsigned is_utmi_l1_suspend:1;
-       u8 hird_threshold;
-
-       u8 lpm_nyet_threshold;
-
-       unsigned disable_scramble_quirk:1;
-       unsigned has_lpm_erratum:1;
-       unsigned u2exit_lfps_quirk:1;
-       unsigned u2ss_inp3_quirk:1;
-       unsigned req_p1p2p3_quirk:1;
-       unsigned del_p1p2p3_quirk:1;
-       unsigned del_phy_power_chg_quirk:1;
-       unsigned lfps_filter_quirk:1;
-       unsigned rx_detect_poll_quirk:1;
-       unsigned dis_u3_susphy_quirk:1;
-       unsigned dis_u2_susphy_quirk:1;
-       unsigned dis_enblslpm_quirk:1;
-       unsigned dis_rxdet_inp3_quirk:1;
-
-       unsigned tx_de_emphasis_quirk:1;
-       unsigned tx_de_emphasis:2;
-
-       u32 fladj_value;
-
-       const char *hsphy_interface;
-};
index 3ac7252f44275e7bb9287f92c5fc946d7063967f..d24cefd191b558e8452beab674130b7ff49b893d 100644 (file)
@@ -71,7 +71,8 @@ DECLARE_EVENT_CLASS(dwc3_log_event,
        TP_fast_assign(
                __entry->event = event;
        ),
-       TP_printk("event %08x", __entry->event)
+       TP_printk("event (%08x): %s", __entry->event,
+                       dwc3_decode_event(__entry->event))
 );
 
 DEFINE_EVENT(dwc3_log_event, dwc3_event,
@@ -85,21 +86,21 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl,
        TP_STRUCT__entry(
                __field(__u8, bRequestType)
                __field(__u8, bRequest)
-               __field(__le16, wValue)
-               __field(__le16, wIndex)
-               __field(__le16, wLength)
+               __field(__u16, wValue)
+               __field(__u16, wIndex)
+               __field(__u16, wLength)
        ),
        TP_fast_assign(
                __entry->bRequestType = ctrl->bRequestType;
                __entry->bRequest = ctrl->bRequest;
-               __entry->wValue = ctrl->wValue;
-               __entry->wIndex = ctrl->wIndex;
-               __entry->wLength = ctrl->wLength;
+               __entry->wValue = le16_to_cpu(ctrl->wValue);
+               __entry->wIndex = le16_to_cpu(ctrl->wIndex);
+               __entry->wLength = le16_to_cpu(ctrl->wLength);
        ),
        TP_printk("bRequestType %02x bRequest %02x wValue %04x wIndex %04x wLength %d",
                __entry->bRequestType, __entry->bRequest,
-               le16_to_cpu(__entry->wValue), le16_to_cpu(__entry->wIndex),
-               le16_to_cpu(__entry->wLength)
+               __entry->wValue, __entry->wIndex,
+               __entry->wLength
        )
 );
 
@@ -166,37 +167,41 @@ DEFINE_EVENT(dwc3_log_request, dwc3_gadget_giveback,
 );
 
 DECLARE_EVENT_CLASS(dwc3_log_generic_cmd,
-       TP_PROTO(unsigned int cmd, u32 param),
-       TP_ARGS(cmd, param),
+       TP_PROTO(unsigned int cmd, u32 param, int status),
+       TP_ARGS(cmd, param, status),
        TP_STRUCT__entry(
                __field(unsigned int, cmd)
                __field(u32, param)
+               __field(int, status)
        ),
        TP_fast_assign(
                __entry->cmd = cmd;
                __entry->param = param;
+               __entry->status = status;
        ),
-       TP_printk("cmd '%s' [%d] param %08x",
+       TP_printk("cmd '%s' [%d] param %08x --> status: %s",
                dwc3_gadget_generic_cmd_string(__entry->cmd),
-               __entry->cmd, __entry->param
+               __entry->cmd, __entry->param,
+               dwc3_gadget_generic_cmd_status_string(__entry->status)
        )
 );
 
 DEFINE_EVENT(dwc3_log_generic_cmd, dwc3_gadget_generic_cmd,
-       TP_PROTO(unsigned int cmd, u32 param),
-       TP_ARGS(cmd, param)
+       TP_PROTO(unsigned int cmd, u32 param, int status),
+       TP_ARGS(cmd, param, status)
 );
 
 DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
        TP_PROTO(struct dwc3_ep *dep, unsigned int cmd,
-               struct dwc3_gadget_ep_cmd_params *params),
-       TP_ARGS(dep, cmd, params),
+               struct dwc3_gadget_ep_cmd_params *params, int cmd_status),
+       TP_ARGS(dep, cmd, params, cmd_status),
        TP_STRUCT__entry(
                __dynamic_array(char, name, DWC3_MSG_MAX)
                __field(unsigned int, cmd)
                __field(u32, param0)
                __field(u32, param1)
                __field(u32, param2)
+               __field(int, cmd_status)
        ),
        TP_fast_assign(
                snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
@@ -204,18 +209,20 @@ DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
                __entry->param0 = params->param0;
                __entry->param1 = params->param1;
                __entry->param2 = params->param2;
+               __entry->cmd_status = cmd_status;
        ),
-       TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x",
+       TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x --> status: %s",
                __get_str(name), dwc3_gadget_ep_cmd_string(__entry->cmd),
                __entry->cmd, __entry->param0,
-               __entry->param1, __entry->param2
+               __entry->param1, __entry->param2,
+               dwc3_ep_cmd_status_string(__entry->cmd_status)
        )
 );
 
 DEFINE_EVENT(dwc3_log_gadget_ep_cmd, dwc3_gadget_ep_cmd,
        TP_PROTO(struct dwc3_ep *dep, unsigned int cmd,
-               struct dwc3_gadget_ep_cmd_params *params),
-       TP_ARGS(dep, cmd, params)
+               struct dwc3_gadget_ep_cmd_params *params, int cmd_status),
+       TP_ARGS(dep, cmd, params, cmd_status)
 );
 
 DECLARE_EVENT_CLASS(dwc3_log_trb,
@@ -224,6 +231,8 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
        TP_STRUCT__entry(
                __dynamic_array(char, name, DWC3_MSG_MAX)
                __field(struct dwc3_trb *, trb)
+               __field(u32, allocated)
+               __field(u32, queued)
                __field(u32, bpl)
                __field(u32, bph)
                __field(u32, size)
@@ -232,14 +241,53 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
        TP_fast_assign(
                snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
                __entry->trb = trb;
+               __entry->allocated = dep->allocated_requests;
+               __entry->queued = dep->queued_requests;
                __entry->bpl = trb->bpl;
                __entry->bph = trb->bph;
                __entry->size = trb->size;
                __entry->ctrl = trb->ctrl;
        ),
-       TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x",
-               __get_str(name), __entry->trb, __entry->bph, __entry->bpl,
-               __entry->size, __entry->ctrl
+       TP_printk("%s: %d/%d trb %p buf %08x%08x size %d ctrl %08x (%c%c%c%c:%c%c:%s)",
+               __get_str(name), __entry->queued, __entry->allocated,
+               __entry->trb, __entry->bph, __entry->bpl,
+               __entry->size, __entry->ctrl,
+               __entry->ctrl & DWC3_TRB_CTRL_HWO ? 'H' : 'h',
+               __entry->ctrl & DWC3_TRB_CTRL_LST ? 'L' : 'l',
+               __entry->ctrl & DWC3_TRB_CTRL_CHN ? 'C' : 'c',
+               __entry->ctrl & DWC3_TRB_CTRL_CSP ? 'S' : 's',
+               __entry->ctrl & DWC3_TRB_CTRL_ISP_IMI ? 'S' : 's',
+               __entry->ctrl & DWC3_TRB_CTRL_IOC ? 'C' : 'c',
+               ({char *s;
+               switch (__entry->ctrl & 0x3f0) {
+               case DWC3_TRBCTL_NORMAL:
+                       s = "normal";
+                       break;
+               case DWC3_TRBCTL_CONTROL_SETUP:
+                       s = "setup";
+                       break;
+               case DWC3_TRBCTL_CONTROL_STATUS2:
+                       s = "status2";
+                       break;
+               case DWC3_TRBCTL_CONTROL_STATUS3:
+                       s = "status3";
+                       break;
+               case DWC3_TRBCTL_CONTROL_DATA:
+                       s = "data";
+                       break;
+               case DWC3_TRBCTL_ISOCHRONOUS_FIRST:
+                       s = "isoc-first";
+                       break;
+               case DWC3_TRBCTL_ISOCHRONOUS:
+                       s = "isoc";
+                       break;
+               case DWC3_TRBCTL_LINK_TRB:
+                       s = "link";
+                       break;
+               default:
+                       s = "UNKNOWN";
+                       break;
+               } s; })
        )
 );
 
index 8cfc3191be50aee8f9157ba95e84c49446d1ae55..12731e67d2c7a01a9a6198129b39d0d684007556 100644 (file)
@@ -13,7 +13,7 @@
 
 #include <linux/console.h>
 #include <linux/errno.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/pci_regs.h>
 #include <linux/pci_ids.h>
 #include <linux/usb/ch9.h>
@@ -1093,5 +1093,5 @@ static int __init kgdbdbgp_start_thread(void)
 
        return 0;
 }
-module_init(kgdbdbgp_start_thread);
+device_initcall(kgdbdbgp_start_thread);
 #endif /* CONFIG_KGDB */
index 2057add439f09b59b111a609dfcc061e499a8bbd..3c3f31ceece76b4412362a32b90d86e5e48004ed 100644 (file)
@@ -114,7 +114,7 @@ config USB_GADGET_VBUS_DRAW
 
 config USB_GADGET_STORAGE_NUM_BUFFERS
        int "Number of storage pipeline buffers"
-       range 2 32
+       range 2 256
        default 2
        help
           Usually 2 buffers are enough to establish a good buffering
index e6c0542a063bfa33ba090357ebb1e16df696430e..17a6077b89a49ed41e1abbf1c4ae5beb4c4edb86 100644 (file)
@@ -93,7 +93,7 @@ int usb_gadget_config_buf(
        *cp = *config;
 
        /* then interface/endpoint/class/vendor/... */
-       len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf,
+       len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8 *)buf,
                        length - USB_DT_CONFIG_SIZE, desc);
        if (len < 0)
                return len;
index cc33d26674080a1359ed7be97410bb640453b469..5c8429f23a892782febdb3c650efa8c168705227 100644 (file)
@@ -130,6 +130,12 @@ struct ffs_epfile {
 
        struct dentry                   *dentry;
 
+       /*
+        * Buffer for holding data from partial reads which may happen since
+        * we’re rounding user read requests to a multiple of a max packet size.
+        */
+       struct ffs_buffer               *read_buffer;   /* P: epfile->mutex */
+
        char                            name[5];
 
        unsigned char                   in;     /* P: ffs->eps_lock */
@@ -138,6 +144,12 @@ struct ffs_epfile {
        unsigned char                   _pad;
 };
 
+struct ffs_buffer {
+       size_t length;
+       char *data;
+       char storage[];
+};
+
 /*  ffs_io_data structure ***************************************************/
 
 struct ffs_io_data {
@@ -640,6 +652,49 @@ static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
        }
 }
 
+static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
+{
+       ssize_t ret = copy_to_iter(data, data_len, iter);
+       if (likely(ret == data_len))
+               return ret;
+
+       if (unlikely(iov_iter_count(iter)))
+               return -EFAULT;
+
+       /*
+        * Dear user space developer!
+        *
+        * TL;DR: To stop getting below error message in your kernel log, change
+        * user space code using functionfs to align read buffers to a max
+        * packet size.
+        *
+        * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
+        * packet size.  When unaligned buffer is passed to functionfs, it
+        * internally uses a larger, aligned buffer so that such UDCs are happy.
+        *
+        * Unfortunately, this means that host may send more data than was
+        * requested in read(2) system call.  f_fs doesn’t know what to do with
+        * that excess data so it simply drops it.
+        *
+        * Was the buffer aligned in the first place, no such problem would
+        * happen.
+        *
+        * Data may be dropped only in AIO reads.  Synchronous reads are handled
+        * by splitting a request into multiple parts.  This splitting may still
+        * be a problem though so it’s likely best to align the buffer
+        * regardless of it being AIO or not..
+        *
+        * This only affects OUT endpoints, i.e. reading data with a read(2),
+        * aio_read(2) etc. system calls.  Writing data to an IN endpoint is not
+        * affected.
+        */
+       pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
+              "Align read buffer size to max packet size to avoid the problem.\n",
+              data_len, ret);
+
+       return ret;
+}
+
 static void ffs_user_copy_worker(struct work_struct *work)
 {
        struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
@@ -650,9 +705,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
 
        if (io_data->read && ret > 0) {
                use_mm(io_data->mm);
-               ret = copy_to_iter(io_data->buf, ret, &io_data->data);
-               if (ret != io_data->req->actual && iov_iter_count(&io_data->data))
-                       ret = -EFAULT;
+               ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
                unuse_mm(io_data->mm);
        }
 
@@ -680,6 +733,58 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
        schedule_work(&io_data->work);
 }
 
+/* Assumes epfile->mutex is held. */
+static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
+                                         struct iov_iter *iter)
+{
+       struct ffs_buffer *buf = epfile->read_buffer;
+       ssize_t ret;
+       if (!buf)
+               return 0;
+
+       ret = copy_to_iter(buf->data, buf->length, iter);
+       if (buf->length == ret) {
+               kfree(buf);
+               epfile->read_buffer = NULL;
+       } else if (unlikely(iov_iter_count(iter))) {
+               ret = -EFAULT;
+       } else {
+               buf->length -= ret;
+               buf->data += ret;
+       }
+       return ret;
+}
+
+/* Assumes epfile->mutex is held. */
+static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
+                                     void *data, int data_len,
+                                     struct iov_iter *iter)
+{
+       struct ffs_buffer *buf;
+
+       ssize_t ret = copy_to_iter(data, data_len, iter);
+       if (likely(data_len == ret))
+               return ret;
+
+       if (unlikely(iov_iter_count(iter)))
+               return -EFAULT;
+
+       /* See ffs_copy_to_iter for more context. */
+       pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
+               data_len, ret);
+
+       data_len -= ret;
+       buf = kmalloc(sizeof(*buf) + data_len, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       buf->length = data_len;
+       buf->data = buf->storage;
+       memcpy(buf->storage, data + ret, data_len);
+       epfile->read_buffer = buf;
+
+       return ret;
+}
+
 static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
 {
        struct ffs_epfile *epfile = file->private_data;
@@ -709,21 +814,40 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
        if (halt && epfile->isoc)
                return -EINVAL;
 
+       /* We will be using request and read_buffer */
+       ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
+       if (unlikely(ret))
+               goto error;
+
        /* Allocate & copy */
        if (!halt) {
+               struct usb_gadget *gadget;
+
+               /*
+                * Do we have buffered data from previous partial read?  Check
+                * that for synchronous case only because we do not have
+                * facility to â€˜wake up’ a pending asynchronous read and push
+                * buffered data to it which we would need to make things behave
+                * consistently.
+                */
+               if (!io_data->aio && io_data->read) {
+                       ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
+                       if (ret)
+                               goto error_mutex;
+               }
+
                /*
                 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
                 * before the waiting completes, so do not assign to 'gadget'
                 * earlier
                 */
-               struct usb_gadget *gadget = epfile->ffs->gadget;
-               size_t copied;
+               gadget = epfile->ffs->gadget;
 
                spin_lock_irq(&epfile->ffs->eps_lock);
                /* In the meantime, endpoint got disabled or changed. */
                if (epfile->ep != ep) {
-                       spin_unlock_irq(&epfile->ffs->eps_lock);
-                       return -ESHUTDOWN;
+                       ret = -ESHUTDOWN;
+                       goto error_lock;
                }
                data_len = iov_iter_count(&io_data->data);
                /*
@@ -735,22 +859,17 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                spin_unlock_irq(&epfile->ffs->eps_lock);
 
                data = kmalloc(data_len, GFP_KERNEL);
-               if (unlikely(!data))
-                       return -ENOMEM;
-               if (!io_data->read) {
-                       copied = copy_from_iter(data, data_len, &io_data->data);
-                       if (copied != data_len) {
-                               ret = -EFAULT;
-                               goto error;
-                       }
+               if (unlikely(!data)) {
+                       ret = -ENOMEM;
+                       goto error_mutex;
+               }
+               if (!io_data->read &&
+                   copy_from_iter(data, data_len, &io_data->data) != data_len) {
+                       ret = -EFAULT;
+                       goto error_mutex;
                }
        }
 
-       /* We will be using request */
-       ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
-       if (unlikely(ret))
-               goto error;
-
        spin_lock_irq(&epfile->ffs->eps_lock);
 
        if (epfile->ep != ep) {
@@ -803,18 +922,13 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                        interrupted = ep->status < 0;
                }
 
-               /*
-                * XXX We may end up silently droping data here.  Since data_len
-                * (i.e. req->length) may be bigger than len (after being
-                * rounded up to maxpacketsize), we may end up with more data
-                * then user space has space for.
-                */
-               ret = interrupted ? -EINTR : ep->status;
-               if (io_data->read && ret > 0) {
-                       ret = copy_to_iter(data, ret, &io_data->data);
-                       if (!ret)
-                               ret = -EFAULT;
-               }
+               if (interrupted)
+                       ret = -EINTR;
+               else if (io_data->read && ep->status > 0)
+                       ret = __ffs_epfile_read_data(epfile, data, ep->status,
+                                                    &io_data->data);
+               else
+                       ret = ep->status;
                goto error_mutex;
        } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_KERNEL))) {
                ret = -ENOMEM;
@@ -980,6 +1094,8 @@ ffs_epfile_release(struct inode *inode, struct file *file)
 
        ENTER();
 
+       kfree(epfile->read_buffer);
+       epfile->read_buffer = NULL;
        ffs_data_closed(epfile->ffs);
 
        return 0;
@@ -1605,19 +1721,24 @@ static void ffs_func_eps_disable(struct ffs_function *func)
        unsigned count            = func->ffs->eps_count;
        unsigned long flags;
 
-       spin_lock_irqsave(&func->ffs->eps_lock, flags);
        do {
+               if (epfile)
+                       mutex_lock(&epfile->mutex);
+               spin_lock_irqsave(&func->ffs->eps_lock, flags);
                /* pending requests get nuked */
                if (likely(ep->ep))
                        usb_ep_disable(ep->ep);
                ++ep;
+               spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 
                if (epfile) {
                        epfile->ep = NULL;
+                       kfree(epfile->read_buffer);
+                       epfile->read_buffer = NULL;
+                       mutex_unlock(&epfile->mutex);
                        ++epfile;
                }
        } while (--count);
-       spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 }
 
 static int ffs_func_eps_enable(struct ffs_function *func)
@@ -2227,8 +2348,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
 {
        u32 str_count, needed_count, lang_count;
        struct usb_gadget_strings **stringtabs, *t;
-       struct usb_string *strings, *s;
        const char *data = _data;
+       struct usb_string *s;
 
        ENTER();
 
@@ -2286,7 +2407,6 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
                stringtabs = vla_ptr(vlabuf, d, stringtabs);
                t = vla_ptr(vlabuf, d, stringtab);
                s = vla_ptr(vlabuf, d, strings);
-               strings = s;
        }
 
        /* For each language */
index 5c6d4d7ca6052553faaa3edbbba1f3109593b34f..2505117e88e8bad0f1ce98a49498731634ceaf88 100644 (file)
@@ -2655,18 +2655,6 @@ void fsg_common_put(struct fsg_common *common)
 }
 EXPORT_SYMBOL_GPL(fsg_common_put);
 
-/* check if fsg_num_buffers is within a valid range */
-static inline int fsg_num_buffers_validate(unsigned int fsg_num_buffers)
-{
-#define FSG_MAX_NUM_BUFFERS    32
-
-       if (fsg_num_buffers >= 2 && fsg_num_buffers <= FSG_MAX_NUM_BUFFERS)
-               return 0;
-       pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
-              fsg_num_buffers, 2, FSG_MAX_NUM_BUFFERS);
-       return -EINVAL;
-}
-
 static struct fsg_common *fsg_common_setup(struct fsg_common *common)
 {
        if (!common) {
@@ -2709,11 +2697,7 @@ static void _fsg_common_free_buffers(struct fsg_buffhd *buffhds, unsigned n)
 int fsg_common_set_num_buffers(struct fsg_common *common, unsigned int n)
 {
        struct fsg_buffhd *bh, *buffhds;
-       int i, rc;
-
-       rc = fsg_num_buffers_validate(n);
-       if (rc != 0)
-               return rc;
+       int i;
 
        buffhds = kcalloc(n, sizeof(*buffhds), GFP_KERNEL);
        if (!buffhds)
@@ -3401,10 +3385,6 @@ static ssize_t fsg_opts_num_buffers_store(struct config_item *item,
        if (ret)
                goto end;
 
-       ret = fsg_num_buffers_validate(num);
-       if (ret)
-               goto end;
-
        fsg_common_set_num_buffers(opts->common, num);
        ret = len;
 
index 3580f198df8b4d49a0f817aeaae3ef4e49a9a720..6ded6345cd09bde695c2cb83fb9a79a6a88de9af 100644 (file)
@@ -907,7 +907,6 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
 {
        struct gs_port  *port = tty->driver_data;
        unsigned long   flags;
-       int             status;
 
        pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
                        port->port_num, tty, count);
@@ -917,7 +916,7 @@ static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
                count = gs_buf_put(&port->port_write_buf, buf, count);
        /* treat count == 0 as flush_chars() */
        if (port->port_usb)
-               status = gs_start_tx(port);
+               gs_start_tx(port);
        spin_unlock_irqrestore(&port->port_lock, flags);
 
        return count;
index f85639ef8a8f66e49b9dbaff3a530fea4be3d9cc..6da7316f8e87d73d7edb8a8215ac886db381c9bc 100644 (file)
@@ -265,7 +265,7 @@ static void *functionfs_acquire_dev(struct ffs_dev *dev)
 {
        if (!try_module_get(THIS_MODULE))
                return ERR_PTR(-ENOENT);
-       
+
        return NULL;
 }
 
@@ -275,7 +275,7 @@ static void functionfs_release_dev(struct ffs_dev *dev)
 }
 
 /*
- * The caller of this function takes ffs_lock 
+ * The caller of this function takes ffs_lock
  */
 static int functionfs_ready_callback(struct ffs_data *ffs)
 {
@@ -294,12 +294,12 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
                ++missing_funcs;
                gfs_registered = false;
        }
-       
+
        return ret;
 }
 
 /*
- * The caller of this function takes ffs_lock 
+ * The caller of this function takes ffs_lock
  */
 static void functionfs_closed_callback(struct ffs_data *ffs)
 {
@@ -347,17 +347,14 @@ static int gfs_bind(struct usb_composite_dev *cdev)
 
 #ifdef CONFIG_USB_FUNCTIONFS_RNDIS
        {
-               struct f_rndis_opts *rndis_opts;
-
                fi_rndis = usb_get_function_instance("rndis");
                if (IS_ERR(fi_rndis)) {
                        ret = PTR_ERR(fi_rndis);
                        goto error;
                }
-               rndis_opts = container_of(fi_rndis, struct f_rndis_opts,
-                                         func_inst);
 #ifndef CONFIG_USB_FUNCTIONFS_ETH
-               net = rndis_opts->net;
+               net = container_of(fi_rndis, struct f_rndis_opts,
+                                  func_inst)->net;
 #endif
        }
 #endif
index 7c289416f87dcfdc37fb4ff8cab6f58aa2c3b863..658b8da6091523c275fd225b16355fb40cdbd70e 100644 (file)
@@ -312,7 +312,7 @@ config USB_NET2272_DMA
          If unsure, say "N" here.  The driver works fine in PIO mode.
 
 config USB_NET2280
-       tristate "NetChip 228x / PLX USB338x"
+       tristate "NetChip NET228x / PLX USB3x8x"
        depends on PCI
        help
           NetChip 2280 / 2282 is a PCI based USB peripheral controller which
@@ -322,6 +322,8 @@ config USB_NET2280
           (for control transfers) and several endpoints with dedicated
           functions.
 
+          PLX 2380 is a PCIe version of the PLX 2380.
+
           PLX 3380 / 3382 is a PCIe based USB peripheral controller which
           supports full, high speed USB 2.0 and super speed USB 3.0
           data transfers.
index dfee534463198e30e787a7ed2d95e2f41b87a711..98e74ed9f555547d8d4b778bb7d2fc4d19e553f1 100644 (file)
@@ -1,3 +1,8 @@
+# define_trace.h needs to know how to find our header
+CFLAGS_trace.o                 := -I$(src)
+
+udc-core-y                     := core.o trace.o
+
 #
 # USB peripheral controller drivers
 #
index 39d70b4a89581620d329444af1af61367d8b40ea..ea03ca7ae29a22cd171584846891883e426da570 100644 (file)
@@ -2340,7 +2340,6 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
        struct udc_ep *ep;
        struct udc_request *req;
        struct udc_data_dma *td;
-       unsigned dma_done;
        unsigned len;
 
        ep = &dev->ep[ep_ix];
@@ -2385,13 +2384,8 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
                         */
                        if (use_dma_ppb_du) {
                                td = udc_get_last_dma_desc(req);
-                               if (td) {
-                                       dma_done =
-                                               AMD_GETBITS(td->status,
-                                               UDC_DMA_IN_STS_BS);
-                                       /* don't care DMA done */
+                               if (td)
                                        req->req.actual = req->req.length;
-                               }
                        } else {
                                /* assume all bytes transferred */
                                req->req.actual = req->req.length;
@@ -3417,4 +3411,3 @@ module_pci_driver(udc_pci_driver);
 MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
 MODULE_AUTHOR("Thomas Dahlmann");
 MODULE_LICENSE("GPL");
-
index 18569de06b0495762950fcc724f69d05f10047af..bb1f6c8f0f01ab492c6b5c0d1852c655d2cc26ee 100644 (file)
@@ -1920,6 +1920,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
 
        udc->errata = match->data;
        udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
+       if (IS_ERR(udc->pmc))
+               udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
        if (udc->errata && IS_ERR(udc->pmc))
                return ERR_CAST(udc->pmc);
 
index 6a4155c4bd862cd50a3c5bced0fadffcd48f9c15..4d5e9188beae4522b235cf22b6b2e877402742d5 100644 (file)
@@ -57,7 +57,6 @@ static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
                                        u32 param0, u32 param1, u32 param2)
 {
        u32 temp, cmd_status;
-       int reset_bdc = 0;
        int ret;
 
        temp = bdc_readl(bdc->regs, BDC_CMDSC);
@@ -94,7 +93,6 @@ static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
 
        case BDC_CMDS_INTL:
                dev_err(bdc->dev, "BDC Internal error\n");
-               reset_bdc = 1;
                ret = -ECONNRESET;
                break;
 
@@ -102,7 +100,6 @@ static int bdc_submit_cmd(struct bdc *bdc, u32 cmd_sc,
                dev_err(bdc->dev,
                        "command timedout waited for %dusec\n",
                        BDC_CMD_TIMEOUT);
-               reset_bdc = 1;
                ret = -ECONNRESET;
                break;
        default:
index d6199507f86140b15439463f97f234aa7955d6fe..ccaa74ab6c0ed5c8bc2a0c10b921745c8d2977a9 100644 (file)
@@ -81,7 +81,7 @@ static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
                        continue;
                }
                if (!bd_table->start_bd) {
-                       dev_dbg(bdc->dev, "bd dma pool not allocted\n");
+                       dev_dbg(bdc->dev, "bd dma pool not allocated\n");
                        continue;
                }
 
@@ -702,11 +702,9 @@ static int ep0_queue(struct bdc_ep *ep, struct bdc_req *req)
 /* Queue data stage */
 static int ep0_queue_data_stage(struct bdc *bdc)
 {
-       struct usb_request *ep0_usb_req;
        struct bdc_ep *ep;
 
        dev_dbg(bdc->dev, "%s\n", __func__);
-       ep0_usb_req = &bdc->ep0_req.usb_req;
        ep = bdc->bdc_ep_array[1];
        bdc->ep0_req.ep = ep;
        bdc->ep0_req.usb_req.complete = NULL;
@@ -1393,10 +1391,8 @@ static int ep0_set_sel(struct bdc *bdc,
 {
        struct bdc_ep   *ep;
        u16     wLength;
-       u16     wValue;
 
        dev_dbg(bdc->dev, "%s\n", __func__);
-       wValue = le16_to_cpu(setup_pkt->wValue);
        wLength = le16_to_cpu(setup_pkt->wLength);
        if (unlikely(wLength != 6)) {
                dev_err(bdc->dev, "%s Wrong wLength:%d\n", __func__, wLength);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
new file mode 100644 (file)
index 0000000..ff8685e
--- /dev/null
@@ -0,0 +1,1523 @@
+/**
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb.h>
+
+#include "trace.h"
+
+/**
+ * struct usb_udc - describes one usb device controller
+ * @driver - the gadget driver pointer. For use by the class code
+ * @dev - the child device to the actual controller
+ * @gadget - the gadget. For use by the class code
+ * @list - for use by the udc class driver
+ * @vbus - for udcs who care about vbus status, this value is real vbus status;
+ * for udcs who do not care about vbus status, this value is always true
+ *
+ * This represents the internal data structure which is used by the UDC-class
+ * to hold information about udc driver and gadget together.
+ */
+struct usb_udc {
+       struct usb_gadget_driver        *driver;
+       struct usb_gadget               *gadget;
+       struct device                   dev;
+       struct list_head                list;
+       bool                            vbus;
+};
+
+static struct class *udc_class;
+static LIST_HEAD(udc_list);
+static LIST_HEAD(gadget_driver_pending_list);
+static DEFINE_MUTEX(udc_lock);
+
+static int udc_bind_to_driver(struct usb_udc *udc,
+               struct usb_gadget_driver *driver);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint
+ * @ep:the endpoint being configured
+ * @maxpacket_limit:value of maximum packet size limit
+ *
+ * This function should be used only in UDC drivers to initialize endpoint
+ * (usually in probe function).
+ */
+void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
+                                             unsigned maxpacket_limit)
+{
+       ep->maxpacket_limit = maxpacket_limit;
+       ep->maxpacket = maxpacket_limit;
+
+       trace_usb_ep_set_maxpacket_limit(ep, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit);
+
+/**
+ * usb_ep_enable - configure endpoint, making it usable
+ * @ep:the endpoint being configured.  may not be the endpoint named "ep0".
+ *     drivers discover endpoints through the ep_list of a usb_gadget.
+ *
+ * When configurations are set, or when interface settings change, the driver
+ * will enable or disable the relevant endpoints.  while it is enabled, an
+ * endpoint may be used for i/o until the driver receives a disconnect() from
+ * the host or until the endpoint is disabled.
+ *
+ * the ep0 implementation (which calls this routine) must ensure that the
+ * hardware capabilities of each endpoint match the descriptor provided
+ * for it.  for example, an endpoint named "ep2in-bulk" would be usable
+ * for interrupt transfers as well as bulk, but it likely couldn't be used
+ * for iso transfers or for endpoint 14.  some endpoints are fully
+ * configurable, with more generic names like "ep-a".  (remember that for
+ * USB, "in" means "towards the USB master".)
+ *
+ * returns zero, or a negative error code.
+ */
+int usb_ep_enable(struct usb_ep *ep)
+{
+       int ret = 0;
+
+       if (ep->enabled)
+               goto out;
+
+       ret = ep->ops->enable(ep, ep->desc);
+       if (ret) {
+               ret = ret;
+               goto out;
+       }
+
+       ep->enabled = true;
+
+out:
+       trace_usb_ep_enable(ep, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_enable);
+
+/**
+ * usb_ep_disable - endpoint is no longer usable
+ * @ep:the endpoint being unconfigured.  may not be the endpoint named "ep0".
+ *
+ * no other task may be using this endpoint when this is called.
+ * any pending and uncompleted requests will complete with status
+ * indicating disconnect (-ESHUTDOWN) before this call returns.
+ * gadget drivers must call usb_ep_enable() again before queueing
+ * requests to the endpoint.
+ *
+ * returns zero, or a negative error code.
+ */
+int usb_ep_disable(struct usb_ep *ep)
+{
+       int ret = 0;
+
+       if (!ep->enabled)
+               goto out;
+
+       ret = ep->ops->disable(ep);
+       if (ret) {
+               ret = ret;
+               goto out;
+       }
+
+       ep->enabled = false;
+
+out:
+       trace_usb_ep_disable(ep, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_disable);
+
+/**
+ * usb_ep_alloc_request - allocate a request object to use with this endpoint
+ * @ep:the endpoint to be used with with the request
+ * @gfp_flags:GFP_* flags to use
+ *
+ * Request objects must be allocated with this call, since they normally
+ * need controller-specific setup and may even need endpoint-specific
+ * resources such as allocation of DMA descriptors.
+ * Requests may be submitted with usb_ep_queue(), and receive a single
+ * completion callback.  Free requests with usb_ep_free_request(), when
+ * they are no longer needed.
+ *
+ * Returns the request, or null if one could not be allocated.
+ */
+struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
+                                                      gfp_t gfp_flags)
+{
+       struct usb_request *req = NULL;
+
+       req = ep->ops->alloc_request(ep, gfp_flags);
+
+       trace_usb_ep_alloc_request(ep, req, req ? 0 : -ENOMEM);
+
+       return req;
+}
+EXPORT_SYMBOL_GPL(usb_ep_alloc_request);
+
+/**
+ * usb_ep_free_request - frees a request object
+ * @ep:the endpoint associated with the request
+ * @req:the request being freed
+ *
+ * Reverses the effect of usb_ep_alloc_request().
+ * Caller guarantees the request is not queued, and that it will
+ * no longer be requeued (or otherwise used).
+ */
+void usb_ep_free_request(struct usb_ep *ep,
+                                      struct usb_request *req)
+{
+       ep->ops->free_request(ep, req);
+       trace_usb_ep_free_request(ep, req, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_free_request);
+
+/**
+ * usb_ep_queue - queues (submits) an I/O request to an endpoint.
+ * @ep:the endpoint associated with the request
+ * @req:the request being submitted
+ * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
+ *     pre-allocate all necessary memory with the request.
+ *
+ * This tells the device controller to perform the specified request through
+ * that endpoint (reading or writing a buffer).  When the request completes,
+ * including being canceled by usb_ep_dequeue(), the request's completion
+ * routine is called to return the request to the driver.  Any endpoint
+ * (except control endpoints like ep0) may have more than one transfer
+ * request queued; they complete in FIFO order.  Once a gadget driver
+ * submits a request, that request may not be examined or modified until it
+ * is given back to that driver through the completion callback.
+ *
+ * Each request is turned into one or more packets.  The controller driver
+ * never merges adjacent requests into the same packet.  OUT transfers
+ * will sometimes use data that's already buffered in the hardware.
+ * Drivers can rely on the fact that the first byte of the request's buffer
+ * always corresponds to the first byte of some USB packet, for both
+ * IN and OUT transfers.
+ *
+ * Bulk endpoints can queue any amount of data; the transfer is packetized
+ * automatically.  The last packet will be short if the request doesn't fill it
+ * out completely.  Zero length packets (ZLPs) should be avoided in portable
+ * protocols since not all usb hardware can successfully handle zero length
+ * packets.  (ZLPs may be explicitly written, and may be implicitly written if
+ * the request 'zero' flag is set.)  Bulk endpoints may also be used
+ * for interrupt transfers; but the reverse is not true, and some endpoints
+ * won't support every interrupt transfer.  (Such as 768 byte packets.)
+ *
+ * Interrupt-only endpoints are less functional than bulk endpoints, for
+ * example by not supporting queueing or not handling buffers that are
+ * larger than the endpoint's maxpacket size.  They may also treat data
+ * toggle differently.
+ *
+ * Control endpoints ... after getting a setup() callback, the driver queues
+ * one response (even if it would be zero length).  That enables the
+ * status ack, after transferring data as specified in the response.  Setup
+ * functions may return negative error codes to generate protocol stalls.
+ * (Note that some USB device controllers disallow protocol stall responses
+ * in some cases.)  When control responses are deferred (the response is
+ * written after the setup callback returns), then usb_ep_set_halt() may be
+ * used on ep0 to trigger protocol stalls.  Depending on the controller,
+ * it may not be possible to trigger a status-stage protocol stall when the
+ * data stage is over, that is, from within the response's completion
+ * routine.
+ *
+ * For periodic endpoints, like interrupt or isochronous ones, the usb host
+ * arranges to poll once per interval, and the gadget driver usually will
+ * have queued some data to transfer at that time.
+ *
+ * Returns zero, or a negative error code.  Endpoints that are not enabled
+ * report errors; errors will also be
+ * reported when the usb peripheral is disconnected.
+ */
+int usb_ep_queue(struct usb_ep *ep,
+                              struct usb_request *req, gfp_t gfp_flags)
+{
+       int ret = 0;
+
+       if (WARN_ON_ONCE(!ep->enabled && ep->address)) {
+               ret = -ESHUTDOWN;
+               goto out;
+       }
+
+       ret = ep->ops->queue(ep, req, gfp_flags);
+
+out:
+       trace_usb_ep_queue(ep, req, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_queue);
+
+/**
+ * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
+ * @ep:the endpoint associated with the request
+ * @req:the request being canceled
+ *
+ * If the request is still active on the endpoint, it is dequeued and its
+ * completion routine is called (with status -ECONNRESET); else a negative
+ * error code is returned. This is guaranteed to happen before the call to
+ * usb_ep_dequeue() returns.
+ *
+ * Note that some hardware can't clear out write fifos (to unlink the request
+ * at the head of the queue) except as part of disconnecting from usb. Such
+ * restrictions prevent drivers from supporting configuration changes,
+ * even to configuration zero (a "chapter 9" requirement).
+ */
+int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
+{
+       int ret;
+
+       ret = ep->ops->dequeue(ep, req);
+       trace_usb_ep_dequeue(ep, req, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_dequeue);
+
+/**
+ * usb_ep_set_halt - sets the endpoint halt feature.
+ * @ep: the non-isochronous endpoint being stalled
+ *
+ * Use this to stall an endpoint, perhaps as an error report.
+ * Except for control endpoints,
+ * the endpoint stays halted (will not stream any data) until the host
+ * clears this feature; drivers may need to empty the endpoint's request
+ * queue first, to make sure no inappropriate transfers happen.
+ *
+ * Note that while an endpoint CLEAR_FEATURE will be invisible to the
+ * gadget driver, a SET_INTERFACE will not be.  To reset endpoints for the
+ * current altsetting, see usb_ep_clear_halt().  When switching altsettings,
+ * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
+ *
+ * Returns zero, or a negative error code.  On success, this call sets
+ * underlying hardware state that blocks data transfers.
+ * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
+ * transfer requests are still queued, or if the controller hardware
+ * (usually a FIFO) still holds bytes that the host hasn't collected.
+ */
+int usb_ep_set_halt(struct usb_ep *ep)
+{
+       int ret;
+
+       ret = ep->ops->set_halt(ep, 1);
+       trace_usb_ep_set_halt(ep, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_halt);
+
+/**
+ * usb_ep_clear_halt - clears endpoint halt, and resets toggle
+ * @ep:the bulk or interrupt endpoint being reset
+ *
+ * Use this when responding to the standard usb "set interface" request,
+ * for endpoints that aren't reconfigured, after clearing any other state
+ * in the endpoint's i/o queue.
+ *
+ * Returns zero, or a negative error code.  On success, this call clears
+ * the underlying hardware state reflecting endpoint halt and data toggle.
+ * Note that some hardware can't support this request (like pxa2xx_udc),
+ * and accordingly can't correctly implement interface altsettings.
+ */
+int usb_ep_clear_halt(struct usb_ep *ep)
+{
+       int ret;
+
+       ret = ep->ops->set_halt(ep, 0);
+       trace_usb_ep_clear_halt(ep, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_clear_halt);
+
+/**
+ * usb_ep_set_wedge - sets the halt feature and ignores clear requests
+ * @ep: the endpoint being wedged
+ *
+ * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
+ * requests. If the gadget driver clears the halt status, it will
+ * automatically unwedge the endpoint.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_ep_set_wedge(struct usb_ep *ep)
+{
+       int ret;
+
+       if (ep->ops->set_wedge)
+               ret = ep->ops->set_wedge(ep);
+       else
+               ret = ep->ops->set_halt(ep, 1);
+
+       trace_usb_ep_set_wedge(ep, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_set_wedge);
+
+/**
+ * usb_ep_fifo_status - returns number of bytes in fifo, or error
+ * @ep: the endpoint whose fifo status is being checked.
+ *
+ * FIFO endpoints may have "unclaimed data" in them in certain cases,
+ * such as after aborted transfers.  Hosts may not have collected all
+ * the IN data written by the gadget driver (and reported by a request
+ * completion).  The gadget driver may not have collected all the data
+ * written OUT to it by the host.  Drivers that need precise handling for
+ * fault reporting or recovery may need to use this call.
+ *
+ * This returns the number of such bytes in the fifo, or a negative
+ * errno if the endpoint doesn't use a FIFO or doesn't support such
+ * precise handling.
+ */
+int usb_ep_fifo_status(struct usb_ep *ep)
+{
+       int ret;
+
+       if (ep->ops->fifo_status)
+               ret = ep->ops->fifo_status(ep);
+       else
+               ret = -EOPNOTSUPP;
+
+       trace_usb_ep_fifo_status(ep, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_ep_fifo_status);
+
+/**
+ * usb_ep_fifo_flush - flushes contents of a fifo
+ * @ep: the endpoint whose fifo is being flushed.
+ *
+ * This call may be used to flush the "unclaimed data" that may exist in
+ * an endpoint fifo after abnormal transaction terminations.  The call
+ * must never be used except when endpoint is not being used for any
+ * protocol translation.
+ */
+void usb_ep_fifo_flush(struct usb_ep *ep)
+{
+       if (ep->ops->fifo_flush)
+               ep->ops->fifo_flush(ep);
+
+       trace_usb_ep_fifo_flush(ep, 0);
+}
+EXPORT_SYMBOL_GPL(usb_ep_fifo_flush);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_frame_number - returns the current frame number
+ * @gadget: controller that reports the frame number
+ *
+ * Returns the usb frame number, normally eleven bits from a SOF packet,
+ * or negative errno if this device doesn't support this capability.
+ */
+int usb_gadget_frame_number(struct usb_gadget *gadget)
+{
+       int ret;
+
+       ret = gadget->ops->get_frame(gadget);
+
+       trace_usb_gadget_frame_number(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_frame_number);
+
+/**
+ * usb_gadget_wakeup - tries to wake up the host connected to this gadget
+ * @gadget: controller used to wake up the host
+ *
+ * Returns zero on success, else negative error code if the hardware
+ * doesn't support such attempts, or its support has not been enabled
+ * by the usb host.  Drivers must return device descriptors that report
+ * their ability to support this, or hosts won't enable it.
+ *
+ * This may also try to use SRP to wake the host and start enumeration,
+ * even if OTG isn't otherwise in use.  OTG devices may also start
+ * remote wakeup even when hosts don't explicitly enable it.
+ */
+int usb_gadget_wakeup(struct usb_gadget *gadget)
+{
+       int ret = 0;
+
+       if (!gadget->ops->wakeup) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       ret = gadget->ops->wakeup(gadget);
+
+out:
+       trace_usb_gadget_wakeup(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_wakeup);
+
+/**
+ * usb_gadget_set_selfpowered - sets the device selfpowered feature.
+ * @gadget:the device being declared as self-powered
+ *
+ * this affects the device status reported by the hardware driver
+ * to reflect that it now has a local power supply.
+ *
+ * returns zero on success, else negative errno.
+ */
+int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
+{
+       int ret = 0;
+
+       if (!gadget->ops->set_selfpowered) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       ret = gadget->ops->set_selfpowered(gadget, 1);
+
+out:
+       trace_usb_gadget_set_selfpowered(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_set_selfpowered);
+
+/**
+ * usb_gadget_clear_selfpowered - clear the device selfpowered feature.
+ * @gadget:the device being declared as bus-powered
+ *
+ * this affects the device status reported by the hardware driver.
+ * some hardware may not support bus-powered operation, in which
+ * case this feature's value can never change.
+ *
+ * returns zero on success, else negative errno.
+ */
+int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
+{
+       int ret = 0;
+
+       if (!gadget->ops->set_selfpowered) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       ret = gadget->ops->set_selfpowered(gadget, 0);
+
+out:
+       trace_usb_gadget_clear_selfpowered(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_clear_selfpowered);
+
+/**
+ * usb_gadget_vbus_connect - Notify controller that VBUS is powered
+ * @gadget:The device which now has VBUS power.
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session starting.  Common responses include
+ * resuming the controller, activating the D+ (or D-) pullup to let the
+ * host detect that a USB device is attached, and starting to draw power
+ * (8mA or possibly more, especially after SET_CONFIGURATION).
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_connect(struct usb_gadget *gadget)
+{
+       int ret = 0;
+
+       if (!gadget->ops->vbus_session) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       ret = gadget->ops->vbus_session(gadget, 1);
+
+out:
+       trace_usb_gadget_vbus_connect(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_connect);
+
+/**
+ * usb_gadget_vbus_draw - constrain controller's VBUS power usage
+ * @gadget:The device whose VBUS usage is being described
+ * @mA:How much current to draw, in milliAmperes.  This should be twice
+ *     the value listed in the configuration descriptor bMaxPower field.
+ *
+ * This call is used by gadget drivers during SET_CONFIGURATION calls,
+ * reporting how much power the device may consume.  For example, this
+ * could affect how quickly batteries are recharged.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
+{
+       int ret = 0;
+
+       if (!gadget->ops->vbus_draw) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       ret = gadget->ops->vbus_draw(gadget, mA);
+       if (!ret)
+               gadget->mA = mA;
+
+out:
+       trace_usb_gadget_vbus_draw(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_draw);
+
+/**
+ * usb_gadget_vbus_disconnect - notify controller about VBUS session end
+ * @gadget:the device whose VBUS supply is being described
+ * Context: can sleep
+ *
+ * This call is used by a driver for an external transceiver (or GPIO)
+ * that detects a VBUS power session ending.  Common responses include
+ * reversing everything done in usb_gadget_vbus_connect().
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
+{
+       int ret = 0;
+
+       if (!gadget->ops->vbus_session) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       ret = gadget->ops->vbus_session(gadget, 0);
+
+out:
+       trace_usb_gadget_vbus_disconnect(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
+
+/**
+ * usb_gadget_connect - software-controlled connect to USB host
+ * @gadget:the peripheral being connected
+ *
+ * Enables the D+ (or potentially D-) pullup.  The host will start
+ * enumerating this gadget when the pullup is active and a VBUS session
+ * is active (the link is powered).  This pullup is always enabled unless
+ * usb_gadget_disconnect() has been used to disable it.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_connect(struct usb_gadget *gadget)
+{
+       int ret = 0;
+
+       if (!gadget->ops->pullup) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if (gadget->deactivated) {
+               /*
+                * If gadget is deactivated we only save new state.
+                * Gadget will be connected automatically after activation.
+                */
+               gadget->connected = true;
+               goto out;
+       }
+
+       ret = gadget->ops->pullup(gadget, 1);
+       if (!ret)
+               gadget->connected = 1;
+
+out:
+       trace_usb_gadget_connect(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_connect);
+
+/**
+ * usb_gadget_disconnect - software-controlled disconnect from USB host
+ * @gadget:the peripheral being disconnected
+ *
+ * Disables the D+ (or potentially D-) pullup, which the host may see
+ * as a disconnect (when a VBUS session is active).  Not all systems
+ * support software pullup controls.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_disconnect(struct usb_gadget *gadget)
+{
+       int ret = 0;
+
+       if (!gadget->ops->pullup) {
+               ret = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if (gadget->deactivated) {
+               /*
+                * If gadget is deactivated we only save new state.
+                * Gadget will stay disconnected after activation.
+                */
+               gadget->connected = false;
+               goto out;
+       }
+
+       ret = gadget->ops->pullup(gadget, 0);
+       if (!ret)
+               gadget->connected = 0;
+
+out:
+       trace_usb_gadget_disconnect(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
+
+/**
+ * usb_gadget_deactivate - deactivate function which is not ready to work
+ * @gadget: the peripheral being deactivated
+ *
+ * This routine may be used during the gadget driver bind() call to prevent
+ * the peripheral from ever being visible to the USB host, unless later
+ * usb_gadget_activate() is called.  For example, user mode components may
+ * need to be activated before the system can talk to hosts.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_deactivate(struct usb_gadget *gadget)
+{
+       int ret = 0;
+
+       if (gadget->deactivated)
+               goto out;
+
+       if (gadget->connected) {
+               ret = usb_gadget_disconnect(gadget);
+               if (ret)
+                       goto out;
+
+               /*
+                * If gadget was being connected before deactivation, we want
+                * to reconnect it in usb_gadget_activate().
+                */
+               gadget->connected = true;
+       }
+       gadget->deactivated = true;
+
+out:
+       trace_usb_gadget_deactivate(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_deactivate);
+
+/**
+ * usb_gadget_activate - activate function which is not ready to work
+ * @gadget: the peripheral being activated
+ *
+ * This routine activates gadget which was previously deactivated with
+ * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_activate(struct usb_gadget *gadget)
+{
+       int ret = 0;
+
+       if (!gadget->deactivated)
+               goto out;
+
+       gadget->deactivated = false;
+
+       /*
+        * If gadget has been connected before deactivation, or became connected
+        * while it was being deactivated, we call usb_gadget_connect().
+        */
+       if (gadget->connected)
+               ret = usb_gadget_connect(gadget);
+
+out:
+       trace_usb_gadget_activate(gadget, ret);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_activate);
+
+/* ------------------------------------------------------------------------- */
+
+#ifdef CONFIG_HAS_DMA
+
+int usb_gadget_map_request_by_dev(struct device *dev,
+               struct usb_request *req, int is_in)
+{
+       if (req->length == 0)
+               return 0;
+
+       if (req->num_sgs) {
+               int     mapped;
+
+               mapped = dma_map_sg(dev, req->sg, req->num_sgs,
+                               is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               if (mapped == 0) {
+                       dev_err(dev, "failed to map SGs\n");
+                       return -EFAULT;
+               }
+
+               req->num_mapped_sgs = mapped;
+       } else {
+               req->dma = dma_map_single(dev, req->buf, req->length,
+                               is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+               if (dma_mapping_error(dev, req->dma)) {
+                       dev_err(dev, "failed to map buffer\n");
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev);
+
+int usb_gadget_map_request(struct usb_gadget *gadget,
+               struct usb_request *req, int is_in)
+{
+       return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_map_request);
+
+void usb_gadget_unmap_request_by_dev(struct device *dev,
+               struct usb_request *req, int is_in)
+{
+       if (req->length == 0)
+               return;
+
+       if (req->num_mapped_sgs) {
+               dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
+                               is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+               req->num_mapped_sgs = 0;
+       } else {
+               dma_unmap_single(dev, req->dma, req->length,
+                               is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+       }
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev);
+
+void usb_gadget_unmap_request(struct usb_gadget *gadget,
+               struct usb_request *req, int is_in)
+{
+       usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
+
+#endif /* CONFIG_HAS_DMA */
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * usb_gadget_giveback_request - give the request back to the gadget layer
+ * Context: in_interrupt()
+ *
+ * This is called by device controller drivers in order to return the
+ * completed request back to the gadget layer.
+ */
+void usb_gadget_giveback_request(struct usb_ep *ep,
+               struct usb_request *req)
+{
+       if (likely(req->status == 0))
+               usb_led_activity(USB_LED_EVENT_GADGET);
+
+       trace_usb_gadget_giveback_request(ep, req, 0);
+
+       req->complete(ep, req);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_giveback_request);
+
+/* ------------------------------------------------------------------------- */
+
+/**
+ * gadget_find_ep_by_name - returns ep whose name is the same as sting passed
+ *     in second parameter or NULL if searched endpoint not found
+ * @g: controller to check for quirk
+ * @name: name of searched endpoint
+ */
+struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name)
+{
+       struct usb_ep *ep;
+
+       gadget_for_each_ep(ep, g) {
+               if (!strcmp(ep->name, name))
+                       return ep;
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(gadget_find_ep_by_name);
+
+/* ------------------------------------------------------------------------- */
+
+int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
+               struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
+               struct usb_ss_ep_comp_descriptor *ep_comp)
+{
+       u8              type;
+       u16             max;
+       int             num_req_streams = 0;
+
+       /* endpoint already claimed? */
+       if (ep->claimed)
+               return 0;
+
+       type = usb_endpoint_type(desc);
+       max = 0x7ff & usb_endpoint_maxp(desc);
+
+       if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
+               return 0;
+       if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
+               return 0;
+
+       if (max > ep->maxpacket_limit)
+               return 0;
+
+       /* "high bandwidth" works only at high speed */
+       if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
+               return 0;
+
+       switch (type) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               /* only support ep0 for portable CONTROL traffic */
+               return 0;
+       case USB_ENDPOINT_XFER_ISOC:
+               if (!ep->caps.type_iso)
+                       return 0;
+               /* ISO:  limit 1023 bytes full speed, 1024 high/super speed */
+               if (!gadget_is_dualspeed(gadget) && max > 1023)
+                       return 0;
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+               if (!ep->caps.type_bulk)
+                       return 0;
+               if (ep_comp && gadget_is_superspeed(gadget)) {
+                       /* Get the number of required streams from the
+                        * EP companion descriptor and see if the EP
+                        * matches it
+                        */
+                       num_req_streams = ep_comp->bmAttributes & 0x1f;
+                       if (num_req_streams > ep->max_streams)
+                               return 0;
+               }
+               break;
+       case USB_ENDPOINT_XFER_INT:
+               /* Bulk endpoints handle interrupt transfers,
+                * except the toggle-quirky iso-synch kind
+                */
+               if (!ep->caps.type_int && !ep->caps.type_bulk)
+                       return 0;
+               /* INT:  limit 64 bytes full speed, 1024 high/super speed */
+               if (!gadget_is_dualspeed(gadget) && max > 64)
+                       return 0;
+               break;
+       }
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
+
+/* ------------------------------------------------------------------------- */
+
+static void usb_gadget_state_work(struct work_struct *work)
+{
+       struct usb_gadget *gadget = work_to_gadget(work);
+       struct usb_udc *udc = gadget->udc;
+
+       if (udc)
+               sysfs_notify(&udc->dev.kobj, NULL, "state");
+}
+
+void usb_gadget_set_state(struct usb_gadget *gadget,
+               enum usb_device_state state)
+{
+       gadget->state = state;
+       schedule_work(&gadget->work);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_set_state);
+
+/* ------------------------------------------------------------------------- */
+
+static void usb_udc_connect_control(struct usb_udc *udc)
+{
+       if (udc->vbus)
+               usb_gadget_connect(udc->gadget);
+       else
+               usb_gadget_disconnect(udc->gadget);
+}
+
+/**
+ * usb_udc_vbus_handler - updates the udc core vbus status, and try to
+ * connect or disconnect gadget
+ * @gadget: The gadget which vbus change occurs
+ * @status: The vbus status
+ *
+ * The udc driver calls it when it wants to connect or disconnect gadget
+ * according to vbus status.
+ */
+void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
+{
+       struct usb_udc *udc = gadget->udc;
+
+       if (udc) {
+               udc->vbus = status;
+               usb_udc_connect_control(udc);
+       }
+}
+EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
+
+/**
+ * usb_gadget_udc_reset - notifies the udc core that bus reset occurs
+ * @gadget: The gadget which bus reset occurs
+ * @driver: The gadget driver we want to notify
+ *
+ * If the udc driver has bus reset handler, it needs to call this when the bus
+ * reset occurs, it notifies the gadget driver that the bus reset occurs as
+ * well as updates gadget state.
+ */
+void usb_gadget_udc_reset(struct usb_gadget *gadget,
+               struct usb_gadget_driver *driver)
+{
+       driver->reset(gadget);
+       usb_gadget_set_state(gadget, USB_STATE_DEFAULT);
+}
+EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
+
+/**
+ * usb_gadget_udc_start - tells usb device controller to start up
+ * @udc: The UDC to be started
+ *
+ * This call is issued by the UDC Class driver when it's about
+ * to register a gadget driver to the device controller, before
+ * calling gadget driver's bind() method.
+ *
+ * It allows the controller to be powered off until strictly
+ * necessary to have it powered on.
+ *
+ * Returns zero on success, else negative errno.
+ */
+static inline int usb_gadget_udc_start(struct usb_udc *udc)
+{
+       return udc->gadget->ops->udc_start(udc->gadget, udc->driver);
+}
+
+/**
+ * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
+ * @gadget: The device we want to stop activity
+ * @driver: The driver to unbind from @gadget
+ *
+ * This call is issued by the UDC Class driver after calling
+ * gadget driver's unbind() method.
+ *
+ * The details are implementation specific, but it can go as
+ * far as powering off UDC completely and disable its data
+ * line pullups.
+ */
+static inline void usb_gadget_udc_stop(struct usb_udc *udc)
+{
+       udc->gadget->ops->udc_stop(udc->gadget);
+}
+
+/**
+ * usb_udc_release - release the usb_udc struct
+ * @dev: the dev member within usb_udc
+ *
+ * This is called by driver's core in order to free memory once the last
+ * reference is released.
+ */
+static void usb_udc_release(struct device *dev)
+{
+       struct usb_udc *udc;
+
+       udc = container_of(dev, struct usb_udc, dev);
+       dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
+       kfree(udc);
+}
+
+static const struct attribute_group *usb_udc_attr_groups[];
+
+static void usb_udc_nop_release(struct device *dev)
+{
+       dev_vdbg(dev, "%s\n", __func__);
+}
+
+/**
+ * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller driver's
+ * device.
+ * @gadget: the gadget to be added to the list.
+ * @release: a gadget release function.
+ *
+ * Returns zero on success, negative errno otherwise.
+ */
+int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
+               void (*release)(struct device *dev))
+{
+       struct usb_udc          *udc;
+       struct usb_gadget_driver *driver;
+       int                     ret = -ENOMEM;
+
+       udc = kzalloc(sizeof(*udc), GFP_KERNEL);
+       if (!udc)
+               goto err1;
+
+       dev_set_name(&gadget->dev, "gadget");
+       INIT_WORK(&gadget->work, usb_gadget_state_work);
+       gadget->dev.parent = parent;
+
+       if (release)
+               gadget->dev.release = release;
+       else
+               gadget->dev.release = usb_udc_nop_release;
+
+       ret = device_register(&gadget->dev);
+       if (ret)
+               goto err2;
+
+       device_initialize(&udc->dev);
+       udc->dev.release = usb_udc_release;
+       udc->dev.class = udc_class;
+       udc->dev.groups = usb_udc_attr_groups;
+       udc->dev.parent = parent;
+       ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
+       if (ret)
+               goto err3;
+
+       udc->gadget = gadget;
+       gadget->udc = udc;
+
+       mutex_lock(&udc_lock);
+       list_add_tail(&udc->list, &udc_list);
+
+       ret = device_add(&udc->dev);
+       if (ret)
+               goto err4;
+
+       usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
+       udc->vbus = true;
+
+       /* pick up one of pending gadget drivers */
+       list_for_each_entry(driver, &gadget_driver_pending_list, pending) {
+               if (!driver->udc_name || strcmp(driver->udc_name,
+                                               dev_name(&udc->dev)) == 0) {
+                       ret = udc_bind_to_driver(udc, driver);
+                       if (ret != -EPROBE_DEFER)
+                               list_del(&driver->pending);
+                       if (ret)
+                               goto err4;
+                       break;
+               }
+       }
+
+       mutex_unlock(&udc_lock);
+
+       return 0;
+
+err4:
+       list_del(&udc->list);
+       mutex_unlock(&udc_lock);
+
+err3:
+       put_device(&udc->dev);
+       device_del(&gadget->dev);
+
+err2:
+       put_device(&gadget->dev);
+       kfree(udc);
+
+err1:
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
+
+/**
+ * usb_get_gadget_udc_name - get the name of the first UDC controller
+ * This functions returns the name of the first UDC controller in the system.
+ * Please note that this interface is usefull only for legacy drivers which
+ * assume that there is only one UDC controller in the system and they need to
+ * get its name before initialization. There is no guarantee that the UDC
+ * of the returned name will be still available, when gadget driver registers
+ * itself.
+ *
+ * Returns pointer to string with UDC controller name on success, NULL
+ * otherwise. Caller should kfree() returned string.
+ */
+char *usb_get_gadget_udc_name(void)
+{
+       struct usb_udc *udc;
+       char *name = NULL;
+
+       /* For now we take the first available UDC */
+       mutex_lock(&udc_lock);
+       list_for_each_entry(udc, &udc_list, list) {
+               if (!udc->driver) {
+                       name = kstrdup(udc->gadget->name, GFP_KERNEL);
+                       break;
+               }
+       }
+       mutex_unlock(&udc_lock);
+       return name;
+}
+EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name);
+
+/**
+ * usb_add_gadget_udc - adds a new gadget to the udc class driver list
+ * @parent: the parent device to this udc. Usually the controller
+ * driver's device.
+ * @gadget: the gadget to be added to the list
+ *
+ * Returns zero on success, negative errno otherwise.
+ */
+int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
+{
+       return usb_add_gadget_udc_release(parent, gadget, NULL);
+}
+EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
+
+static void usb_gadget_remove_driver(struct usb_udc *udc)
+{
+       dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
+                       udc->driver->function);
+
+       kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+
+       usb_gadget_disconnect(udc->gadget);
+       udc->driver->disconnect(udc->gadget);
+       udc->driver->unbind(udc->gadget);
+       usb_gadget_udc_stop(udc);
+
+       udc->driver = NULL;
+       udc->dev.driver = NULL;
+       udc->gadget->dev.driver = NULL;
+}
+
+/**
+ * usb_del_gadget_udc - deletes @udc from udc_list
+ * @gadget: the gadget to be removed.
+ *
+ * This, will call usb_gadget_unregister_driver() if
+ * the @udc is still busy.
+ */
+void usb_del_gadget_udc(struct usb_gadget *gadget)
+{
+       struct usb_udc *udc = gadget->udc;
+
+       if (!udc)
+               return;
+
+       dev_vdbg(gadget->dev.parent, "unregistering gadget\n");
+
+       mutex_lock(&udc_lock);
+       list_del(&udc->list);
+
+       if (udc->driver) {
+               struct usb_gadget_driver *driver = udc->driver;
+
+               usb_gadget_remove_driver(udc);
+               list_add(&driver->pending, &gadget_driver_pending_list);
+       }
+       mutex_unlock(&udc_lock);
+
+       kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+       flush_work(&gadget->work);
+       device_unregister(&udc->dev);
+       device_unregister(&gadget->dev);
+}
+EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
+
+/* ------------------------------------------------------------------------- */
+
+static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver)
+{
+       int ret;
+
+       dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
+                       driver->function);
+
+       udc->driver = driver;
+       udc->dev.driver = &driver->driver;
+       udc->gadget->dev.driver = &driver->driver;
+
+       ret = driver->bind(udc->gadget, driver);
+       if (ret)
+               goto err1;
+       ret = usb_gadget_udc_start(udc);
+       if (ret) {
+               driver->unbind(udc->gadget);
+               goto err1;
+       }
+       usb_udc_connect_control(udc);
+
+       kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
+       return 0;
+err1:
+       if (ret != -EISNAM)
+               dev_err(&udc->dev, "failed to start %s: %d\n",
+                       udc->driver->function, ret);
+       udc->driver = NULL;
+       udc->dev.driver = NULL;
+       udc->gadget->dev.driver = NULL;
+       return ret;
+}
+
+int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
+{
+       struct usb_udc          *udc = NULL;
+       int                     ret = -ENODEV;
+
+       if (!driver || !driver->bind || !driver->setup)
+               return -EINVAL;
+
+       mutex_lock(&udc_lock);
+       if (driver->udc_name) {
+               list_for_each_entry(udc, &udc_list, list) {
+                       ret = strcmp(driver->udc_name, dev_name(&udc->dev));
+                       if (!ret)
+                               break;
+               }
+               if (!ret && !udc->driver)
+                       goto found;
+       } else {
+               list_for_each_entry(udc, &udc_list, list) {
+                       /* For now we take the first one */
+                       if (!udc->driver)
+                               goto found;
+               }
+       }
+
+       if (!driver->match_existing_only) {
+               list_add_tail(&driver->pending, &gadget_driver_pending_list);
+               pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+                       driver->function);
+               ret = 0;
+       }
+
+       mutex_unlock(&udc_lock);
+       return ret;
+found:
+       ret = udc_bind_to_driver(udc, driver);
+       mutex_unlock(&udc_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
+
+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
+{
+       struct usb_udc          *udc = NULL;
+       int                     ret = -ENODEV;
+
+       if (!driver || !driver->unbind)
+               return -EINVAL;
+
+       mutex_lock(&udc_lock);
+       list_for_each_entry(udc, &udc_list, list)
+               if (udc->driver == driver) {
+                       usb_gadget_remove_driver(udc);
+                       usb_gadget_set_state(udc->gadget,
+                                       USB_STATE_NOTATTACHED);
+                       ret = 0;
+                       break;
+               }
+
+       if (ret) {
+               list_del(&driver->pending);
+               ret = 0;
+       }
+       mutex_unlock(&udc_lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
+
+/* ------------------------------------------------------------------------- */
+
+static ssize_t usb_udc_srp_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t n)
+{
+       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
+
+       if (sysfs_streq(buf, "1"))
+               usb_gadget_wakeup(udc->gadget);
+
+       return n;
+}
+static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store);
+
+static ssize_t usb_udc_softconn_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t n)
+{
+       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
+
+       if (!udc->driver) {
+               dev_err(dev, "soft-connect without a gadget driver\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (sysfs_streq(buf, "connect")) {
+               usb_gadget_udc_start(udc);
+               usb_gadget_connect(udc->gadget);
+       } else if (sysfs_streq(buf, "disconnect")) {
+               usb_gadget_disconnect(udc->gadget);
+               udc->driver->disconnect(udc->gadget);
+               usb_gadget_udc_stop(udc);
+       } else {
+               dev_err(dev, "unsupported command '%s'\n", buf);
+               return -EINVAL;
+       }
+
+       return n;
+}
+static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
+       struct usb_gadget       *gadget = udc->gadget;
+
+       return sprintf(buf, "%s\n", usb_state_string(gadget->state));
+}
+static DEVICE_ATTR_RO(state);
+
+#define USB_UDC_SPEED_ATTR(name, param)                                        \
+ssize_t name##_show(struct device *dev,                                        \
+               struct device_attribute *attr, char *buf)               \
+{                                                                      \
+       struct usb_udc *udc = container_of(dev, struct usb_udc, dev);   \
+       return snprintf(buf, PAGE_SIZE, "%s\n",                         \
+                       usb_speed_string(udc->gadget->param));          \
+}                                                                      \
+static DEVICE_ATTR_RO(name)
+
+static USB_UDC_SPEED_ATTR(current_speed, speed);
+static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
+
+#define USB_UDC_ATTR(name)                                     \
+ssize_t name##_show(struct device *dev,                                \
+               struct device_attribute *attr, char *buf)       \
+{                                                              \
+       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev); \
+       struct usb_gadget       *gadget = udc->gadget;          \
+                                                               \
+       return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name);  \
+}                                                              \
+static DEVICE_ATTR_RO(name)
+
+static USB_UDC_ATTR(is_otg);
+static USB_UDC_ATTR(is_a_peripheral);
+static USB_UDC_ATTR(b_hnp_enable);
+static USB_UDC_ATTR(a_hnp_support);
+static USB_UDC_ATTR(a_alt_hnp_support);
+static USB_UDC_ATTR(is_selfpowered);
+
+static struct attribute *usb_udc_attrs[] = {
+       &dev_attr_srp.attr,
+       &dev_attr_soft_connect.attr,
+       &dev_attr_state.attr,
+       &dev_attr_current_speed.attr,
+       &dev_attr_maximum_speed.attr,
+
+       &dev_attr_is_otg.attr,
+       &dev_attr_is_a_peripheral.attr,
+       &dev_attr_b_hnp_enable.attr,
+       &dev_attr_a_hnp_support.attr,
+       &dev_attr_a_alt_hnp_support.attr,
+       &dev_attr_is_selfpowered.attr,
+       NULL,
+};
+
+static const struct attribute_group usb_udc_attr_group = {
+       .attrs = usb_udc_attrs,
+};
+
+static const struct attribute_group *usb_udc_attr_groups[] = {
+       &usb_udc_attr_group,
+       NULL,
+};
+
+static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
+       int                     ret;
+
+       ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
+       if (ret) {
+               dev_err(dev, "failed to add uevent USB_UDC_NAME\n");
+               return ret;
+       }
+
+       if (udc->driver) {
+               ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
+                               udc->driver->function);
+               if (ret) {
+                       dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int __init usb_udc_init(void)
+{
+       udc_class = class_create(THIS_MODULE, "udc");
+       if (IS_ERR(udc_class)) {
+               pr_err("failed to create udc class --> %ld\n",
+                               PTR_ERR(udc_class));
+               return PTR_ERR(udc_class);
+       }
+
+       udc_class->dev_uevent = usb_udc_uevent;
+       return 0;
+}
+subsys_initcall(usb_udc_init);
+
+static void __exit usb_udc_exit(void)
+{
+       class_destroy(udc_class);
+}
+module_exit(usb_udc_exit);
+
+MODULE_DESCRIPTION("UDC Framework");
+MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
+MODULE_LICENSE("GPL v2");
index dde44450dfa93f1fab90da1bd47fd501aaf554e8..77d07904f932e7d65bf735f5dc6238465a309650 100644 (file)
@@ -647,12 +647,10 @@ static int dummy_disable(struct usb_ep *_ep)
 static struct usb_request *dummy_alloc_request(struct usb_ep *_ep,
                gfp_t mem_flags)
 {
-       struct dummy_ep         *ep;
        struct dummy_request    *req;
 
        if (!_ep)
                return NULL;
-       ep = usb_ep_to_dummy_ep(_ep);
 
        req = kzalloc(sizeof(*req), mem_flags);
        if (!req)
@@ -2444,9 +2442,6 @@ static int dummy_start(struct usb_hcd *hcd)
 
 static void dummy_stop(struct usb_hcd *hcd)
 {
-       struct dummy            *dum;
-
-       dum = hcd_to_dummy_hcd(hcd)->dum;
        device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
        dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
 }
index b1cfa96cc88f836b202f6d0ea8eb7dbcc5d09c36..6e977dc225709e7ee90e10b09abf2fad006278c1 100644 (file)
@@ -1199,8 +1199,6 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
        struct m66592 *m66592 = _m66592;
        u16 intsts0;
        u16 intenb0;
-       u16 brdysts, nrdysts, bempsts;
-       u16 brdyenb, nrdyenb, bempenb;
        u16 savepipe;
        u16 mask0;
 
@@ -1224,12 +1222,10 @@ static irqreturn_t m66592_irq(int irq, void *_m66592)
 
        mask0 = intsts0 & intenb0;
        if (mask0) {
-               brdysts = m66592_read(m66592, M66592_BRDYSTS);
-               nrdysts = m66592_read(m66592, M66592_NRDYSTS);
-               bempsts = m66592_read(m66592, M66592_BEMPSTS);
-               brdyenb = m66592_read(m66592, M66592_BRDYENB);
-               nrdyenb = m66592_read(m66592, M66592_NRDYENB);
-               bempenb = m66592_read(m66592, M66592_BEMPENB);
+               u16 brdysts = m66592_read(m66592, M66592_BRDYSTS);
+               u16 bempsts = m66592_read(m66592, M66592_BEMPSTS);
+               u16 brdyenb = m66592_read(m66592, M66592_BRDYENB);
+               u16 bempenb = m66592_read(m66592, M66592_BEMPENB);
 
                if (mask0 & M66592_VBINT) {
                        m66592_write(m66592,  0xffff & ~M66592_VBINT,
@@ -1408,28 +1404,20 @@ static int m66592_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 
 static int m66592_set_halt(struct usb_ep *_ep, int value)
 {
-       struct m66592_ep *ep;
-       struct m66592_request *req;
+       struct m66592_ep *ep = container_of(_ep, struct m66592_ep, ep);
        unsigned long flags;
        int ret = 0;
 
-       ep = container_of(_ep, struct m66592_ep, ep);
-       req = list_entry(ep->queue.next, struct m66592_request, queue);
-
        spin_lock_irqsave(&ep->m66592->lock, flags);
        if (!list_empty(&ep->queue)) {
                ret = -EAGAIN;
-               goto out;
-       }
-       if (value) {
+       } else if (value) {
                ep->busy = 1;
                pipe_stall(ep->m66592, ep->pipenum);
        } else {
                ep->busy = 0;
                pipe_stop(ep->m66592, ep->pipenum);
        }
-
-out:
        spin_unlock_irqrestore(&ep->m66592->lock, flags);
        return ret;
 }
index dafe74eb9adeca207549ed71910d77b22ccd03b2..b9e19a5913224be4fca62cbc03d93de1cb4c2701 100644 (file)
@@ -119,18 +119,14 @@ static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
        struct mv_u3d_req *curr_req)
 {
        struct mv_u3d_trb       *curr_trb;
-       dma_addr_t cur_deq_lo;
-       struct mv_u3d_ep_context        *curr_ep_context;
-       int trb_complete, actual, remaining_length = 0;
+       int actual, remaining_length = 0;
        int direction, ep_num;
        int retval = 0;
        u32 tmp, status, length;
 
-       curr_ep_context = &u3d->ep_context[index];
        direction = index % 2;
        ep_num = index / 2;
 
-       trb_complete = 0;
        actual = curr_req->req.length;
 
        while (!list_empty(&curr_req->trb_list)) {
@@ -143,15 +139,10 @@ static int mv_u3d_process_ep_req(struct mv_u3d *u3d, int index,
                }
 
                curr_trb->trb_hw->ctrl.own = 0;
-               if (direction == MV_U3D_EP_DIR_OUT) {
+               if (direction == MV_U3D_EP_DIR_OUT)
                        tmp = ioread32(&u3d->vuc_regs->rxst[ep_num].statuslo);
-                       cur_deq_lo =
-                               ioread32(&u3d->vuc_regs->rxst[ep_num].curdeqlo);
-               } else {
+               else
                        tmp = ioread32(&u3d->vuc_regs->txst[ep_num].statuslo);
-                       cur_deq_lo =
-                               ioread32(&u3d->vuc_regs->txst[ep_num].curdeqlo);
-               }
 
                status = tmp >> MV_U3D_XFERSTATUS_COMPLETE_SHIFT;
                length = tmp & MV_U3D_XFERSTATUS_TRB_LENGTH_MASK;
@@ -527,7 +518,6 @@ static int mv_u3d_ep_enable(struct usb_ep *_ep,
 {
        struct mv_u3d *u3d;
        struct mv_u3d_ep *ep;
-       struct mv_u3d_ep_context *ep_context;
        u16 max = 0;
        unsigned maxburst = 0;
        u32 epxcr, direction;
@@ -548,9 +538,6 @@ static int mv_u3d_ep_enable(struct usb_ep *_ep,
                _ep->maxburst = 1;
        maxburst = _ep->maxburst;
 
-       /* Get the endpoint context address */
-       ep_context = (struct mv_u3d_ep_context *)ep->ep_context;
-
        /* Set the max burst size */
        switch (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
        case USB_ENDPOINT_XFER_BULK:
@@ -633,7 +620,6 @@ static int  mv_u3d_ep_disable(struct usb_ep *_ep)
 {
        struct mv_u3d *u3d;
        struct mv_u3d_ep *ep;
-       struct mv_u3d_ep_context *ep_context;
        u32 epxcr, direction;
        unsigned long flags;
 
@@ -646,9 +632,6 @@ static int  mv_u3d_ep_disable(struct usb_ep *_ep)
 
        u3d = ep->u3d;
 
-       /* Get the endpoint context address */
-       ep_context = ep->ep_context;
-
        direction = mv_u3d_ep_dir(ep);
 
        /* nuke all pending requests (does flush) */
index 81b6229c780542e1a4e36888fbad329cb742b498..ce73b3552269fcd3793102d408685f69701ed22e 100644 (file)
@@ -129,7 +129,7 @@ static int process_ep_req(struct mv_udc *udc, int index,
 {
        struct mv_dtd   *curr_dtd;
        struct mv_dqh   *curr_dqh;
-       int td_complete, actual, remaining_length;
+       int actual, remaining_length;
        int i, direction;
        int retval = 0;
        u32 errors;
@@ -139,7 +139,6 @@ static int process_ep_req(struct mv_udc *udc, int index,
        direction = index % 2;
 
        curr_dtd = curr_req->head;
-       td_complete = 0;
        actual = curr_req->req.length;
 
        for (i = 0; i < curr_req->dtd_count; i++) {
@@ -412,11 +411,8 @@ static int req_to_dtd(struct mv_req *req)
        unsigned count;
        int is_last, is_first = 1;
        struct mv_dtd *dtd, *last_dtd = NULL;
-       struct mv_udc *udc;
        dma_addr_t dma;
 
-       udc = req->ep->udc;
-
        do {
                dtd = build_dtd(req, &count, &dma, &is_last);
                if (dtd == NULL)
@@ -567,7 +563,7 @@ static int  mv_ep_disable(struct usb_ep *_ep)
        struct mv_udc *udc;
        struct mv_ep *ep;
        struct mv_dqh *dqh;
-       u32 bit_pos, epctrlx, direction;
+       u32 epctrlx, direction;
        unsigned long flags;
 
        ep = container_of(_ep, struct mv_ep, ep);
@@ -582,7 +578,6 @@ static int  mv_ep_disable(struct usb_ep *_ep)
        spin_lock_irqsave(&udc->lock, flags);
 
        direction = ep_dir(ep);
-       bit_pos = 1 << ((direction == EP_DIR_OUT ? 0 : 16) + ep->ep_num);
 
        /* Reset the max packet length and the interrupt on Setup */
        dqh->max_packet_length = 0;
index 18f5ebd447b8239b88f73ab4cf6897a7f47ced7a..7c61134320939c4e95b936a07387b680db5c9d38 100644 (file)
@@ -329,12 +329,10 @@ static int net2272_disable(struct usb_ep *_ep)
 static struct usb_request *
 net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 {
-       struct net2272_ep *ep;
        struct net2272_request *req;
 
        if (!_ep)
                return NULL;
-       ep = container_of(_ep, struct net2272_ep, ep);
 
        req = kzalloc(sizeof(*req), gfp_flags);
        if (!req)
@@ -348,10 +346,8 @@ net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
 static void
 net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
 {
-       struct net2272_ep *ep;
        struct net2272_request *req;
 
-       ep = container_of(_ep, struct net2272_ep, ep);
        if (!_ep || !_req)
                return;
 
index c894b94b234bf58aba86cea8a341abb88941e61a..614ab951a4ae6e0fca159a17ae9fd9914aedd727 100644 (file)
@@ -211,7 +211,7 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
                goto print_err;
        }
 
-       if (dev->quirks & PLX_SUPERSPEED) {
+       if (dev->quirks & PLX_PCIE) {
                if ((desc->bEndpointAddress & 0x0f) >= 0x0c) {
                        ret = -EDOM;
                        goto print_err;
@@ -245,7 +245,7 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
        /* set type, direction, address; reset fifo counters */
        writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
 
-       if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
+       if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
                tmp = readl(&ep->cfg->ep_cfg);
                /* If USB ep number doesn't match hardware ep number */
                if ((tmp & 0xf) != usb_endpoint_num(desc)) {
@@ -316,7 +316,7 @@ net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
                        BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
        }
 
-       if (dev->quirks & PLX_SUPERSPEED)
+       if (dev->quirks & PLX_PCIE)
                ep_clear_seqnum(ep);
        writel(tmp, &ep->cfg->ep_cfg);
 
@@ -527,7 +527,7 @@ static int net2280_disable(struct usb_ep *_ep)
        spin_lock_irqsave(&ep->dev->lock, flags);
        nuke(ep);
 
-       if (ep->dev->quirks & PLX_SUPERSPEED)
+       if (ep->dev->quirks & PLX_PCIE)
                ep_reset_338x(ep->dev->regs, ep);
        else
                ep_reset_228x(ep->dev->regs, ep);
@@ -862,7 +862,7 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
        writel(readl(&dma->dmastat), &dma->dmastat);
 
        writel(td_dma, &dma->dmadesc);
-       if (ep->dev->quirks & PLX_SUPERSPEED)
+       if (ep->dev->quirks & PLX_PCIE)
                dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
        writel(dmactl, &dma->dmactl);
 
@@ -1046,7 +1046,7 @@ net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
 
        /* kickstart this i/o queue? */
        if  (list_empty(&ep->queue) && !ep->stopped &&
-               !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
+               !((dev->quirks & PLX_PCIE) && ep->dma &&
                  (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
 
                /* use DMA if the endpoint supports it, else pio */
@@ -1169,7 +1169,7 @@ static void scan_dma_completions(struct net2280_ep *ep)
                        break;
                } else if (!ep->is_in &&
                           (req->req.length % ep->ep.maxpacket) &&
-                          !(ep->dev->quirks & PLX_SUPERSPEED)) {
+                          !(ep->dev->quirks & PLX_PCIE)) {
 
                        tmp = readl(&ep->regs->ep_stat);
                        /* AVOID TROUBLE HERE by not issuing short reads from
@@ -1367,7 +1367,7 @@ net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
                                ep->wedged = 1;
                } else {
                        clear_halt(ep);
-                       if (ep->dev->quirks & PLX_SUPERSPEED &&
+                       if (ep->dev->quirks & PLX_PCIE &&
                                !list_empty(&ep->queue) && ep->td_dma)
                                        restart_dma(ep);
                        ep->wedged = 0;
@@ -2394,7 +2394,7 @@ static int net2280_start(struct usb_gadget *_gadget,
         */
        net2280_led_active(dev, 1);
 
-       if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
+       if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
                defect7374_enable_data_eps_zero(dev);
 
        ep0_start(dev);
@@ -3063,7 +3063,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
                }
                ep->stopped = 0;
                dev->protocol_stall = 0;
-               if (!(dev->quirks & PLX_SUPERSPEED)) {
+               if (!(dev->quirks & PLX_PCIE)) {
                        if (ep->dev->quirks & PLX_2280)
                                tmp = BIT(FIFO_OVERFLOW) |
                                    BIT(FIFO_UNDERFLOW);
@@ -3090,7 +3090,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
                cpu_to_le32s(&u.raw[0]);
                cpu_to_le32s(&u.raw[1]);
 
-               if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
+               if ((dev->quirks & PLX_PCIE) && !dev->bug7734_patched)
                        defect7374_workaround(dev, u.r);
 
                tmp = 0;
@@ -3173,7 +3173,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
                        } else {
                                ep_vdbg(dev, "%s clear halt\n", e->ep.name);
                                clear_halt(e);
-                               if ((ep->dev->quirks & PLX_SUPERSPEED) &&
+                               if ((ep->dev->quirks & PLX_PCIE) &&
                                        !list_empty(&e->queue) && e->td_dma)
                                                restart_dma(e);
                        }
@@ -3195,7 +3195,7 @@ static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
                        if (e->ep.name == ep0name)
                                goto do_stall;
                        set_halt(e);
-                       if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
+                       if ((dev->quirks & PLX_PCIE) && e->dma)
                                abort_dma(e);
                        allow_status(ep);
                        ep_vdbg(dev, "%s set halt\n", ep->ep.name);
@@ -3234,7 +3234,7 @@ do_stall:
 #undef w_length
 
 next_endpoints:
-       if ((dev->quirks & PLX_SUPERSPEED) && dev->enhanced_mode) {
+       if ((dev->quirks & PLX_PCIE) && dev->enhanced_mode) {
                u32 mask = (BIT(ENDPOINT_0_INTERRUPT) |
                        USB3380_IRQSTAT0_EP_INTR_MASK_IN |
                        USB3380_IRQSTAT0_EP_INTR_MASK_OUT);
@@ -3399,7 +3399,7 @@ __acquires(dev->lock)
                writel(tmp, &dma->dmastat);
 
                /* dma sync*/
-               if (dev->quirks & PLX_SUPERSPEED) {
+               if (dev->quirks & PLX_PCIE) {
                        u32 r_dmacount = readl(&dma->dmacount);
                        if (!ep->is_in &&  (r_dmacount & 0x00FFFFFF) &&
                            (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
@@ -3468,7 +3468,7 @@ static irqreturn_t net2280_irq(int irq, void *_dev)
        /* control requests and PIO */
        handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
 
-       if (dev->quirks & PLX_SUPERSPEED) {
+       if (dev->quirks & PLX_PCIE) {
                /* re-enable interrupt to trigger any possible new interrupt */
                u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
                writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
@@ -3513,7 +3513,7 @@ static void net2280_remove(struct pci_dev *pdev)
        }
        if (dev->got_irq)
                free_irq(pdev->irq, dev);
-       if (dev->quirks & PLX_SUPERSPEED)
+       if (dev->quirks & PLX_PCIE)
                pci_disable_msi(pdev);
        if (dev->regs)
                iounmap(dev->regs);
@@ -3593,7 +3593,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
        dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
 
-       if (dev->quirks & PLX_SUPERSPEED) {
+       if (dev->quirks & PLX_PCIE) {
                u32 fsmvalue;
                u32 usbstat;
                dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
@@ -3637,7 +3637,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto done;
        }
 
-       if (dev->quirks & PLX_SUPERSPEED)
+       if (dev->quirks & PLX_PCIE)
                if (pci_enable_msi(pdev))
                        ep_err(dev, "Failed to enable MSI mode\n");
 
@@ -3755,10 +3755,19 @@ static const struct pci_device_id pci_ids[] = { {
        .class =        PCI_CLASS_SERIAL_USB_DEVICE,
        .class_mask =   ~0,
        .vendor =       PCI_VENDOR_ID_PLX,
+       .device =       0x2380,
+       .subvendor =    PCI_ANY_ID,
+       .subdevice =    PCI_ANY_ID,
+       .driver_data =  PLX_PCIE,
+        },
+       {
+       .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
+       .class_mask =   ~0,
+       .vendor =       PCI_VENDOR_ID_PLX,
        .device =       0x3380,
        .subvendor =    PCI_ANY_ID,
        .subdevice =    PCI_ANY_ID,
-       .driver_data =  PLX_SUPERSPEED,
+       .driver_data =  PLX_PCIE | PLX_SUPERSPEED,
         },
        {
        .class =        PCI_CLASS_SERIAL_USB_DEVICE,
@@ -3767,7 +3776,7 @@ static const struct pci_device_id pci_ids[] = { {
        .device =       0x3382,
        .subvendor =    PCI_ANY_ID,
        .subdevice =    PCI_ANY_ID,
-       .driver_data =  PLX_SUPERSPEED,
+       .driver_data =  PLX_PCIE | PLX_SUPERSPEED,
         },
 { /* end: all zeroes */ }
 };
index 0d32052bf16f90d5e1ace964cedd484c14a6f65d..2736a95751c3834fe5110f36201fa6363a00f33f 100644 (file)
@@ -47,6 +47,7 @@ set_idx_reg(struct net2280_regs __iomem *regs, u32 index, u32 value)
 #define PLX_LEGACY             BIT(0)
 #define PLX_2280               BIT(1)
 #define PLX_SUPERSPEED         BIT(2)
+#define PLX_PCIE               BIT(3)
 
 #define REG_DIAG               0x0
 #define     RETRY_COUNTER                                       16
index ebc51ec5790afaef0f261af9644f4e655c757f5e..a97da645c1b9eaecc5e5bb0bcb5ad25857c43ec8 100644 (file)
@@ -1477,11 +1477,11 @@ static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req,
                req->dma_mapped = 0;
        }
        ep->halted = 1;
-       spin_lock(&dev->lock);
+       spin_unlock(&dev->lock);
        if (!ep->in)
                pch_udc_ep_clear_rrdy(ep);
        usb_gadget_giveback_request(&ep->ep, &req->req);
-       spin_unlock(&dev->lock);
+       spin_lock(&dev->lock);
        ep->halted = halted;
 }
 
@@ -1984,9 +1984,8 @@ static int pch_udc_pcd_set_halt(struct usb_ep *usbep, int halt)
                        if (ep->num == PCH_UDC_EP0)
                                ep->dev->stall = 1;
                        pch_udc_ep_set_stall(ep);
-                       pch_udc_enable_ep_interrupts(ep->dev,
-                                                    PCH_UDC_EPINT(ep->in,
-                                                                  ep->num));
+                       pch_udc_enable_ep_interrupts(
+                               ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
                } else {
                        pch_udc_ep_clear_stall(ep);
                }
@@ -2451,16 +2450,11 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
  */
 static void pch_udc_postsvc_epinters(struct pch_udc_dev *dev, int ep_num)
 {
-       struct pch_udc_ep       *ep;
-       struct pch_udc_request *req;
-
-       ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
-       if (!list_empty(&ep->queue)) {
-               req = list_entry(ep->queue.next, struct pch_udc_request, queue);
-               pch_udc_enable_ep_interrupts(ep->dev,
-                                            PCH_UDC_EPINT(ep->in, ep->num));
-               pch_udc_ep_clear_nak(ep);
-       }
+       struct pch_udc_ep       *ep = &dev->ep[UDC_EPIN_IDX(ep_num)];
+       if (list_empty(&ep->queue))
+               return;
+       pch_udc_enable_ep_interrupts(ep->dev, PCH_UDC_EPINT(ep->in, ep->num));
+       pch_udc_ep_clear_nak(ep);
 }
 
 /**
@@ -2573,9 +2567,9 @@ static void pch_udc_svc_ur_interrupt(struct pch_udc_dev *dev)
                empty_req_queue(ep);
        }
        if (dev->driver) {
-               spin_lock(&dev->lock);
-               usb_gadget_udc_reset(&dev->gadget, dev->driver);
                spin_unlock(&dev->lock);
+               usb_gadget_udc_reset(&dev->gadget, dev->driver);
+               spin_lock(&dev->lock);
        }
 }
 
@@ -2654,9 +2648,9 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
                dev->ep[i].halted = 0;
        }
        dev->stall = 0;
-       spin_lock(&dev->lock);
-       dev->driver->setup(&dev->gadget, &dev->setup_data);
        spin_unlock(&dev->lock);
+       dev->driver->setup(&dev->gadget, &dev->setup_data);
+       spin_lock(&dev->lock);
 }
 
 /**
@@ -2691,9 +2685,9 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
        dev->stall = 0;
 
        /* call gadget zero with setup data received */
-       spin_lock(&dev->lock);
-       dev->driver->setup(&dev->gadget, &dev->setup_data);
        spin_unlock(&dev->lock);
+       dev->driver->setup(&dev->gadget, &dev->setup_data);
+       spin_lock(&dev->lock);
 }
 
 /**
index 001a3b74a993ea677dd65650a4457f5f14376a7c..ad140aa00132eb73dc9d490c5eb06e1b79190878 100644 (file)
@@ -1825,13 +1825,10 @@ fail:
  * Disables all udc endpoints (even control endpoint), report disconnect to
  * the gadget user.
  */
-static void stop_activity(struct pxa_udc *udc, struct usb_gadget_driver *driver)
+static void stop_activity(struct pxa_udc *udc)
 {
        int i;
 
-       /* don't disconnect drivers more than once */
-       if (udc->gadget.speed == USB_SPEED_UNKNOWN)
-               driver = NULL;
        udc->gadget.speed = USB_SPEED_UNKNOWN;
 
        for (i = 0; i < NR_USB_ENDPOINTS; i++)
@@ -1848,7 +1845,7 @@ static int pxa27x_udc_stop(struct usb_gadget *g)
 {
        struct pxa_udc *udc = to_pxa(g);
 
-       stop_activity(udc, NULL);
+       stop_activity(udc);
        udc_disable(udc);
 
        udc->driver = NULL;
@@ -2296,7 +2293,7 @@ static void irq_udc_reset(struct pxa_udc *udc)
 
        if ((udccr & UDCCR_UDA) == 0) {
                dev_dbg(udc->dev, "USB reset start\n");
-               stop_activity(udc, udc->driver);
+               stop_activity(udc);
        }
        udc->gadget.speed = USB_SPEED_FULL;
        memset(&udc->stats, 0, sizeof udc->stats);
index 8b300e6da7fcc4828184efe6184c4ca4fc90226d..f2c8862093a26b7b362905dc72034be0914b321d 100644 (file)
@@ -1464,8 +1464,6 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
        struct r8a66597 *r8a66597 = _r8a66597;
        u16 intsts0;
        u16 intenb0;
-       u16 brdysts, nrdysts, bempsts;
-       u16 brdyenb, nrdyenb, bempenb;
        u16 savepipe;
        u16 mask0;
 
@@ -1481,12 +1479,10 @@ static irqreturn_t r8a66597_irq(int irq, void *_r8a66597)
 
        mask0 = intsts0 & intenb0;
        if (mask0) {
-               brdysts = r8a66597_read(r8a66597, BRDYSTS);
-               nrdysts = r8a66597_read(r8a66597, NRDYSTS);
-               bempsts = r8a66597_read(r8a66597, BEMPSTS);
-               brdyenb = r8a66597_read(r8a66597, BRDYENB);
-               nrdyenb = r8a66597_read(r8a66597, NRDYENB);
-               bempenb = r8a66597_read(r8a66597, BEMPENB);
+               u16 brdysts = r8a66597_read(r8a66597, BRDYSTS);
+               u16 bempsts = r8a66597_read(r8a66597, BEMPSTS);
+               u16 brdyenb = r8a66597_read(r8a66597, BRDYENB);
+               u16 bempenb = r8a66597_read(r8a66597, BEMPENB);
 
                if (mask0 & VBINT) {
                        r8a66597_write(r8a66597,  0xffff & ~VBINT,
@@ -1658,20 +1654,14 @@ static int r8a66597_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 
 static int r8a66597_set_halt(struct usb_ep *_ep, int value)
 {
-       struct r8a66597_ep *ep;
-       struct r8a66597_request *req;
+       struct r8a66597_ep *ep = container_of(_ep, struct r8a66597_ep, ep);
        unsigned long flags;
        int ret = 0;
 
-       ep = container_of(_ep, struct r8a66597_ep, ep);
-       req = get_request_from_ep(ep);
-
        spin_lock_irqsave(&ep->r8a66597->lock, flags);
        if (!list_empty(&ep->queue)) {
                ret = -EAGAIN;
-               goto out;
-       }
-       if (value) {
+       } else if (value) {
                ep->busy = 1;
                pipe_stall(ep->r8a66597, ep->pipenum);
        } else {
@@ -1679,8 +1669,6 @@ static int r8a66597_set_halt(struct usb_ep *_ep, int value)
                ep->wedge = 0;
                pipe_stop(ep->r8a66597, ep->pipenum);
        }
-
-out:
        spin_unlock_irqrestore(&ep->r8a66597->lock, flags);
        return ret;
 }
diff --git a/drivers/usb/gadget/udc/trace.c b/drivers/usb/gadget/udc/trace.c
new file mode 100644 (file)
index 0000000..8c551ab
--- /dev/null
@@ -0,0 +1,18 @@
+/**
+ * trace.c - USB Gadget Framework Trace Support
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Felipe Balbi <felipe.balbi@linux.intel.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/usb/gadget/udc/trace.h b/drivers/usb/gadget/udc/trace.h
new file mode 100644 (file)
index 0000000..da29874
--- /dev/null
@@ -0,0 +1,298 @@
+/**
+ * udc.c - Core UDC Framework
+ *
+ * Copyright (C) 2016 Intel Corporation
+ * Author: Felipe Balbi <felipe.balbi@linux.intel.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gadget
+
+#if !defined(__UDC_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __UDC_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <asm/byteorder.h>
+#include <linux/usb/gadget.h>
+
+DECLARE_EVENT_CLASS(udc_log_gadget,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret),
+       TP_STRUCT__entry(
+               __field(enum usb_device_speed, speed)
+               __field(enum usb_device_speed, max_speed)
+               __field(enum usb_device_state, state)
+               __field(unsigned, mA)
+               __field(unsigned, sg_supported)
+               __field(unsigned, is_otg)
+               __field(unsigned, is_a_peripheral)
+               __field(unsigned, b_hnp_enable)
+               __field(unsigned, a_hnp_support)
+               __field(unsigned, hnp_polling_support)
+               __field(unsigned, host_request_flag)
+               __field(unsigned, quirk_ep_out_aligned_size)
+               __field(unsigned, quirk_altset_not_supp)
+               __field(unsigned, quirk_stall_not_supp)
+               __field(unsigned, quirk_zlp_not_supp)
+               __field(unsigned, is_selfpowered)
+               __field(unsigned, deactivated)
+               __field(unsigned, connected)
+               __field(int, ret)
+       ),
+       TP_fast_assign(
+               __entry->speed = g->speed;
+               __entry->max_speed = g->max_speed;
+               __entry->state = g->state;
+               __entry->mA = g->mA;
+               __entry->sg_supported = g->sg_supported;
+               __entry->is_otg = g->is_otg;
+               __entry->is_a_peripheral = g->is_a_peripheral;
+               __entry->b_hnp_enable = g->b_hnp_enable;
+               __entry->a_hnp_support = g->a_hnp_support;
+               __entry->hnp_polling_support = g->hnp_polling_support;
+               __entry->host_request_flag = g->host_request_flag;
+               __entry->quirk_ep_out_aligned_size = g->quirk_ep_out_aligned_size;
+               __entry->quirk_altset_not_supp = g->quirk_altset_not_supp;
+               __entry->quirk_stall_not_supp = g->quirk_stall_not_supp;
+               __entry->quirk_zlp_not_supp = g->quirk_zlp_not_supp;
+               __entry->is_selfpowered = g->is_selfpowered;
+               __entry->deactivated = g->deactivated;
+               __entry->connected = g->connected;
+               __entry->ret = ret;
+       ),
+       TP_printk("speed %d/%d state %d %dmA [%s%s%s%s%s%s%s%s%s%s%s%s%s%s] --> %d",
+               __entry->speed, __entry->max_speed, __entry->state, __entry->mA,
+               __entry->sg_supported ? "sg:" : "",
+               __entry->is_otg ? "OTG:" : "",
+               __entry->is_a_peripheral ? "a_peripheral:" : "",
+               __entry->b_hnp_enable ? "b_hnp:" : "",
+               __entry->a_hnp_support ? "a_hnp:" : "",
+               __entry->hnp_polling_support ? "hnp_poll:" : "",
+               __entry->host_request_flag ? "hostreq:" : "",
+               __entry->quirk_ep_out_aligned_size ? "out_aligned:" : "",
+               __entry->quirk_altset_not_supp ? "no_altset:" : "",
+               __entry->quirk_stall_not_supp ? "no_stall:" : "",
+               __entry->quirk_zlp_not_supp ? "no_zlp" : "",
+               __entry->is_selfpowered ? "self-powered:" : "bus-powered:",
+               __entry->deactivated ? "deactivated:" : "activated:",
+               __entry->connected ? "connected" : "disconnected",
+               __entry->ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_frame_number,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_wakeup,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_set_selfpowered,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_clear_selfpowered,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_connect,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_draw,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_vbus_disconnect,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_connect,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_disconnect,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_deactivate,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DEFINE_EVENT(udc_log_gadget, usb_gadget_activate,
+       TP_PROTO(struct usb_gadget *g, int ret),
+       TP_ARGS(g, ret)
+);
+
+DECLARE_EVENT_CLASS(udc_log_ep,
+       TP_PROTO(struct usb_ep *ep, int ret),
+       TP_ARGS(ep, ret),
+       TP_STRUCT__entry(
+               __dynamic_array(char, name, UDC_TRACE_STR_MAX)
+               __field(unsigned, maxpacket)
+               __field(unsigned, maxpacket_limit)
+               __field(unsigned, max_streams)
+               __field(unsigned, mult)
+               __field(unsigned, maxburst)
+               __field(u8, address)
+               __field(bool, claimed)
+               __field(bool, enabled)
+               __field(int, ret)
+       ),
+       TP_fast_assign(
+               snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name);
+               __entry->maxpacket = ep->maxpacket;
+               __entry->maxpacket_limit = ep->maxpacket_limit;
+               __entry->max_streams = ep->max_streams;
+               __entry->mult = ep->mult;
+               __entry->maxburst = ep->maxburst;
+               __entry->address = ep->address,
+               __entry->claimed = ep->claimed;
+               __entry->enabled = ep->enabled;
+               __entry->ret = ret;
+       ),
+       TP_printk("%s: mps %d/%d streams %d mult %d burst %d addr %02x %s%s --> %d",
+               __get_str(name), __entry->maxpacket, __entry->maxpacket_limit,
+               __entry->max_streams, __entry->mult, __entry->maxburst,
+               __entry->address, __entry->claimed ? "claimed:" : "released:",
+               __entry->enabled ? "enabled" : "disabled", ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_maxpacket_limit,
+       TP_PROTO(struct usb_ep *ep, int ret),
+       TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_enable,
+       TP_PROTO(struct usb_ep *ep, int ret),
+       TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_disable,
+       TP_PROTO(struct usb_ep *ep, int ret),
+       TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_halt,
+       TP_PROTO(struct usb_ep *ep, int ret),
+       TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_clear_halt,
+       TP_PROTO(struct usb_ep *ep, int ret),
+       TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_set_wedge,
+       TP_PROTO(struct usb_ep *ep, int ret),
+       TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_fifo_status,
+       TP_PROTO(struct usb_ep *ep, int ret),
+       TP_ARGS(ep, ret)
+);
+
+DEFINE_EVENT(udc_log_ep, usb_ep_fifo_flush,
+       TP_PROTO(struct usb_ep *ep, int ret),
+       TP_ARGS(ep, ret)
+);
+
+DECLARE_EVENT_CLASS(udc_log_req,
+       TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+       TP_ARGS(ep, req, ret),
+       TP_STRUCT__entry(
+               __dynamic_array(char, name, UDC_TRACE_STR_MAX)
+               __field(unsigned, length)
+               __field(unsigned, actual)
+               __field(unsigned, num_sgs)
+               __field(unsigned, num_mapped_sgs)
+               __field(unsigned, stream_id)
+               __field(unsigned, no_interrupt)
+               __field(unsigned, zero)
+               __field(unsigned, short_not_ok)
+               __field(int, status)
+               __field(int, ret)
+       ),
+       TP_fast_assign(
+               snprintf(__get_str(name), UDC_TRACE_STR_MAX, "%s", ep->name);
+               __entry->length = req->length;
+               __entry->actual = req->actual;
+               __entry->num_sgs = req->num_sgs;
+               __entry->num_mapped_sgs = req->num_mapped_sgs;
+               __entry->stream_id = req->stream_id;
+               __entry->no_interrupt = req->no_interrupt;
+               __entry->zero = req->zero;
+               __entry->short_not_ok = req->short_not_ok;
+               __entry->status = req->status;
+               __entry->ret = ret;
+       ),
+       TP_printk("%s: length %d/%d sgs %d/%d stream %d %s%s%s status %d --> %d",
+               __get_str(name), __entry->actual, __entry->length,
+               __entry->num_mapped_sgs, __entry->num_sgs, __entry->stream_id,
+               __entry->zero ? "Z" : "z",
+               __entry->short_not_ok ? "S" : "s",
+               __entry->no_interrupt ? "i" : "I",
+               __entry->status, __entry->ret
+       )
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_alloc_request,
+       TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+       TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_free_request,
+       TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+       TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_queue,
+       TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+       TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_ep_dequeue,
+       TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+       TP_ARGS(ep, req, ret)
+);
+
+DEFINE_EVENT(udc_log_req, usb_gadget_giveback_request,
+       TP_PROTO(struct usb_ep *ep, struct usb_request *req, int ret),
+       TP_ARGS(ep, req, ret)
+);
+
+#endif /* __UDC_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
deleted file mode 100644 (file)
index e1b2dce..0000000
+++ /dev/null
@@ -1,800 +0,0 @@
-/**
- * udc.c - Core UDC Framework
- *
- * Copyright (C) 2010 Texas Instruments
- * Author: Felipe Balbi <balbi@ti.com>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2  of
- * the License as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/err.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb.h>
-
-/**
- * struct usb_udc - describes one usb device controller
- * @driver - the gadget driver pointer. For use by the class code
- * @dev - the child device to the actual controller
- * @gadget - the gadget. For use by the class code
- * @list - for use by the udc class driver
- * @vbus - for udcs who care about vbus status, this value is real vbus status;
- * for udcs who do not care about vbus status, this value is always true
- *
- * This represents the internal data structure which is used by the UDC-class
- * to hold information about udc driver and gadget together.
- */
-struct usb_udc {
-       struct usb_gadget_driver        *driver;
-       struct usb_gadget               *gadget;
-       struct device                   dev;
-       struct list_head                list;
-       bool                            vbus;
-};
-
-static struct class *udc_class;
-static LIST_HEAD(udc_list);
-static LIST_HEAD(gadget_driver_pending_list);
-static DEFINE_MUTEX(udc_lock);
-
-static int udc_bind_to_driver(struct usb_udc *udc,
-               struct usb_gadget_driver *driver);
-
-/* ------------------------------------------------------------------------- */
-
-#ifdef CONFIG_HAS_DMA
-
-int usb_gadget_map_request_by_dev(struct device *dev,
-               struct usb_request *req, int is_in)
-{
-       if (req->length == 0)
-               return 0;
-
-       if (req->num_sgs) {
-               int     mapped;
-
-               mapped = dma_map_sg(dev, req->sg, req->num_sgs,
-                               is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-               if (mapped == 0) {
-                       dev_err(dev, "failed to map SGs\n");
-                       return -EFAULT;
-               }
-
-               req->num_mapped_sgs = mapped;
-       } else {
-               req->dma = dma_map_single(dev, req->buf, req->length,
-                               is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
-               if (dma_mapping_error(dev, req->dma)) {
-                       dev_err(dev, "failed to map buffer\n");
-                       return -EFAULT;
-               }
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_map_request_by_dev);
-
-int usb_gadget_map_request(struct usb_gadget *gadget,
-               struct usb_request *req, int is_in)
-{
-       return usb_gadget_map_request_by_dev(gadget->dev.parent, req, is_in);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_map_request);
-
-void usb_gadget_unmap_request_by_dev(struct device *dev,
-               struct usb_request *req, int is_in)
-{
-       if (req->length == 0)
-               return;
-
-       if (req->num_mapped_sgs) {
-               dma_unmap_sg(dev, req->sg, req->num_mapped_sgs,
-                               is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-
-               req->num_mapped_sgs = 0;
-       } else {
-               dma_unmap_single(dev, req->dma, req->length,
-                               is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
-       }
-}
-EXPORT_SYMBOL_GPL(usb_gadget_unmap_request_by_dev);
-
-void usb_gadget_unmap_request(struct usb_gadget *gadget,
-               struct usb_request *req, int is_in)
-{
-       usb_gadget_unmap_request_by_dev(gadget->dev.parent, req, is_in);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
-
-#endif /* CONFIG_HAS_DMA */
-
-/* ------------------------------------------------------------------------- */
-
-/**
- * usb_gadget_giveback_request - give the request back to the gadget layer
- * Context: in_interrupt()
- *
- * This is called by device controller drivers in order to return the
- * completed request back to the gadget layer.
- */
-void usb_gadget_giveback_request(struct usb_ep *ep,
-               struct usb_request *req)
-{
-       if (likely(req->status == 0))
-               usb_led_activity(USB_LED_EVENT_GADGET);
-
-       req->complete(ep, req);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_giveback_request);
-
-/* ------------------------------------------------------------------------- */
-
-/**
- * gadget_find_ep_by_name - returns ep whose name is the same as sting passed
- *     in second parameter or NULL if searched endpoint not found
- * @g: controller to check for quirk
- * @name: name of searched endpoint
- */
-struct usb_ep *gadget_find_ep_by_name(struct usb_gadget *g, const char *name)
-{
-       struct usb_ep *ep;
-
-       gadget_for_each_ep(ep, g) {
-               if (!strcmp(ep->name, name))
-                       return ep;
-       }
-
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(gadget_find_ep_by_name);
-
-/* ------------------------------------------------------------------------- */
-
-int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
-               struct usb_ep *ep, struct usb_endpoint_descriptor *desc,
-               struct usb_ss_ep_comp_descriptor *ep_comp)
-{
-       u8              type;
-       u16             max;
-       int             num_req_streams = 0;
-
-       /* endpoint already claimed? */
-       if (ep->claimed)
-               return 0;
-
-       type = usb_endpoint_type(desc);
-       max = 0x7ff & usb_endpoint_maxp(desc);
-
-       if (usb_endpoint_dir_in(desc) && !ep->caps.dir_in)
-               return 0;
-       if (usb_endpoint_dir_out(desc) && !ep->caps.dir_out)
-               return 0;
-
-       if (max > ep->maxpacket_limit)
-               return 0;
-
-       /* "high bandwidth" works only at high speed */
-       if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
-               return 0;
-
-       switch (type) {
-       case USB_ENDPOINT_XFER_CONTROL:
-               /* only support ep0 for portable CONTROL traffic */
-               return 0;
-       case USB_ENDPOINT_XFER_ISOC:
-               if (!ep->caps.type_iso)
-                       return 0;
-               /* ISO:  limit 1023 bytes full speed, 1024 high/super speed */
-               if (!gadget_is_dualspeed(gadget) && max > 1023)
-                       return 0;
-               break;
-       case USB_ENDPOINT_XFER_BULK:
-               if (!ep->caps.type_bulk)
-                       return 0;
-               if (ep_comp && gadget_is_superspeed(gadget)) {
-                       /* Get the number of required streams from the
-                        * EP companion descriptor and see if the EP
-                        * matches it
-                        */
-                       num_req_streams = ep_comp->bmAttributes & 0x1f;
-                       if (num_req_streams > ep->max_streams)
-                               return 0;
-               }
-               break;
-       case USB_ENDPOINT_XFER_INT:
-               /* Bulk endpoints handle interrupt transfers,
-                * except the toggle-quirky iso-synch kind
-                */
-               if (!ep->caps.type_int && !ep->caps.type_bulk)
-                       return 0;
-               /* INT:  limit 64 bytes full speed, 1024 high/super speed */
-               if (!gadget_is_dualspeed(gadget) && max > 64)
-                       return 0;
-               break;
-       }
-
-       return 1;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_ep_match_desc);
-
-/* ------------------------------------------------------------------------- */
-
-static void usb_gadget_state_work(struct work_struct *work)
-{
-       struct usb_gadget *gadget = work_to_gadget(work);
-       struct usb_udc *udc = gadget->udc;
-
-       if (udc)
-               sysfs_notify(&udc->dev.kobj, NULL, "state");
-}
-
-void usb_gadget_set_state(struct usb_gadget *gadget,
-               enum usb_device_state state)
-{
-       gadget->state = state;
-       schedule_work(&gadget->work);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_set_state);
-
-/* ------------------------------------------------------------------------- */
-
-static void usb_udc_connect_control(struct usb_udc *udc)
-{
-       if (udc->vbus)
-               usb_gadget_connect(udc->gadget);
-       else
-               usb_gadget_disconnect(udc->gadget);
-}
-
-/**
- * usb_udc_vbus_handler - updates the udc core vbus status, and try to
- * connect or disconnect gadget
- * @gadget: The gadget which vbus change occurs
- * @status: The vbus status
- *
- * The udc driver calls it when it wants to connect or disconnect gadget
- * according to vbus status.
- */
-void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
-{
-       struct usb_udc *udc = gadget->udc;
-
-       if (udc) {
-               udc->vbus = status;
-               usb_udc_connect_control(udc);
-       }
-}
-EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
-
-/**
- * usb_gadget_udc_reset - notifies the udc core that bus reset occurs
- * @gadget: The gadget which bus reset occurs
- * @driver: The gadget driver we want to notify
- *
- * If the udc driver has bus reset handler, it needs to call this when the bus
- * reset occurs, it notifies the gadget driver that the bus reset occurs as
- * well as updates gadget state.
- */
-void usb_gadget_udc_reset(struct usb_gadget *gadget,
-               struct usb_gadget_driver *driver)
-{
-       driver->reset(gadget);
-       usb_gadget_set_state(gadget, USB_STATE_DEFAULT);
-}
-EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
-
-/**
- * usb_gadget_udc_start - tells usb device controller to start up
- * @udc: The UDC to be started
- *
- * This call is issued by the UDC Class driver when it's about
- * to register a gadget driver to the device controller, before
- * calling gadget driver's bind() method.
- *
- * It allows the controller to be powered off until strictly
- * necessary to have it powered on.
- *
- * Returns zero on success, else negative errno.
- */
-static inline int usb_gadget_udc_start(struct usb_udc *udc)
-{
-       return udc->gadget->ops->udc_start(udc->gadget, udc->driver);
-}
-
-/**
- * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
- * @gadget: The device we want to stop activity
- * @driver: The driver to unbind from @gadget
- *
- * This call is issued by the UDC Class driver after calling
- * gadget driver's unbind() method.
- *
- * The details are implementation specific, but it can go as
- * far as powering off UDC completely and disable its data
- * line pullups.
- */
-static inline void usb_gadget_udc_stop(struct usb_udc *udc)
-{
-       udc->gadget->ops->udc_stop(udc->gadget);
-}
-
-/**
- * usb_udc_release - release the usb_udc struct
- * @dev: the dev member within usb_udc
- *
- * This is called by driver's core in order to free memory once the last
- * reference is released.
- */
-static void usb_udc_release(struct device *dev)
-{
-       struct usb_udc *udc;
-
-       udc = container_of(dev, struct usb_udc, dev);
-       dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
-       kfree(udc);
-}
-
-static const struct attribute_group *usb_udc_attr_groups[];
-
-static void usb_udc_nop_release(struct device *dev)
-{
-       dev_vdbg(dev, "%s\n", __func__);
-}
-
-/**
- * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list
- * @parent: the parent device to this udc. Usually the controller driver's
- * device.
- * @gadget: the gadget to be added to the list.
- * @release: a gadget release function.
- *
- * Returns zero on success, negative errno otherwise.
- */
-int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
-               void (*release)(struct device *dev))
-{
-       struct usb_udc          *udc;
-       struct usb_gadget_driver *driver;
-       int                     ret = -ENOMEM;
-
-       udc = kzalloc(sizeof(*udc), GFP_KERNEL);
-       if (!udc)
-               goto err1;
-
-       dev_set_name(&gadget->dev, "gadget");
-       INIT_WORK(&gadget->work, usb_gadget_state_work);
-       gadget->dev.parent = parent;
-
-       if (release)
-               gadget->dev.release = release;
-       else
-               gadget->dev.release = usb_udc_nop_release;
-
-       ret = device_register(&gadget->dev);
-       if (ret)
-               goto err2;
-
-       device_initialize(&udc->dev);
-       udc->dev.release = usb_udc_release;
-       udc->dev.class = udc_class;
-       udc->dev.groups = usb_udc_attr_groups;
-       udc->dev.parent = parent;
-       ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj));
-       if (ret)
-               goto err3;
-
-       udc->gadget = gadget;
-       gadget->udc = udc;
-
-       mutex_lock(&udc_lock);
-       list_add_tail(&udc->list, &udc_list);
-
-       ret = device_add(&udc->dev);
-       if (ret)
-               goto err4;
-
-       usb_gadget_set_state(gadget, USB_STATE_NOTATTACHED);
-       udc->vbus = true;
-
-       /* pick up one of pending gadget drivers */
-       list_for_each_entry(driver, &gadget_driver_pending_list, pending) {
-               if (!driver->udc_name || strcmp(driver->udc_name,
-                                               dev_name(&udc->dev)) == 0) {
-                       ret = udc_bind_to_driver(udc, driver);
-                       if (ret != -EPROBE_DEFER)
-                               list_del(&driver->pending);
-                       if (ret)
-                               goto err4;
-                       break;
-               }
-       }
-
-       mutex_unlock(&udc_lock);
-
-       return 0;
-
-err4:
-       list_del(&udc->list);
-       mutex_unlock(&udc_lock);
-
-err3:
-       put_device(&udc->dev);
-       device_del(&gadget->dev);
-
-err2:
-       put_device(&gadget->dev);
-       kfree(udc);
-
-err1:
-       return ret;
-}
-EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release);
-
-/**
- * usb_get_gadget_udc_name - get the name of the first UDC controller
- * This functions returns the name of the first UDC controller in the system.
- * Please note that this interface is usefull only for legacy drivers which
- * assume that there is only one UDC controller in the system and they need to
- * get its name before initialization. There is no guarantee that the UDC
- * of the returned name will be still available, when gadget driver registers
- * itself.
- *
- * Returns pointer to string with UDC controller name on success, NULL
- * otherwise. Caller should kfree() returned string.
- */
-char *usb_get_gadget_udc_name(void)
-{
-       struct usb_udc *udc;
-       char *name = NULL;
-
-       /* For now we take the first available UDC */
-       mutex_lock(&udc_lock);
-       list_for_each_entry(udc, &udc_list, list) {
-               if (!udc->driver) {
-                       name = kstrdup(udc->gadget->name, GFP_KERNEL);
-                       break;
-               }
-       }
-       mutex_unlock(&udc_lock);
-       return name;
-}
-EXPORT_SYMBOL_GPL(usb_get_gadget_udc_name);
-
-/**
- * usb_add_gadget_udc - adds a new gadget to the udc class driver list
- * @parent: the parent device to this udc. Usually the controller
- * driver's device.
- * @gadget: the gadget to be added to the list
- *
- * Returns zero on success, negative errno otherwise.
- */
-int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget)
-{
-       return usb_add_gadget_udc_release(parent, gadget, NULL);
-}
-EXPORT_SYMBOL_GPL(usb_add_gadget_udc);
-
-static void usb_gadget_remove_driver(struct usb_udc *udc)
-{
-       dev_dbg(&udc->dev, "unregistering UDC driver [%s]\n",
-                       udc->driver->function);
-
-       kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
-
-       usb_gadget_disconnect(udc->gadget);
-       udc->driver->disconnect(udc->gadget);
-       udc->driver->unbind(udc->gadget);
-       usb_gadget_udc_stop(udc);
-
-       udc->driver = NULL;
-       udc->dev.driver = NULL;
-       udc->gadget->dev.driver = NULL;
-}
-
-/**
- * usb_del_gadget_udc - deletes @udc from udc_list
- * @gadget: the gadget to be removed.
- *
- * This, will call usb_gadget_unregister_driver() if
- * the @udc is still busy.
- */
-void usb_del_gadget_udc(struct usb_gadget *gadget)
-{
-       struct usb_udc *udc = gadget->udc;
-
-       if (!udc)
-               return;
-
-       dev_vdbg(gadget->dev.parent, "unregistering gadget\n");
-
-       mutex_lock(&udc_lock);
-       list_del(&udc->list);
-
-       if (udc->driver) {
-               struct usb_gadget_driver *driver = udc->driver;
-
-               usb_gadget_remove_driver(udc);
-               list_add(&driver->pending, &gadget_driver_pending_list);
-       }
-       mutex_unlock(&udc_lock);
-
-       kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
-       flush_work(&gadget->work);
-       device_unregister(&udc->dev);
-       device_unregister(&gadget->dev);
-}
-EXPORT_SYMBOL_GPL(usb_del_gadget_udc);
-
-/* ------------------------------------------------------------------------- */
-
-static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *driver)
-{
-       int ret;
-
-       dev_dbg(&udc->dev, "registering UDC driver [%s]\n",
-                       driver->function);
-
-       udc->driver = driver;
-       udc->dev.driver = &driver->driver;
-       udc->gadget->dev.driver = &driver->driver;
-
-       ret = driver->bind(udc->gadget, driver);
-       if (ret)
-               goto err1;
-       ret = usb_gadget_udc_start(udc);
-       if (ret) {
-               driver->unbind(udc->gadget);
-               goto err1;
-       }
-       usb_udc_connect_control(udc);
-
-       kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
-       return 0;
-err1:
-       if (ret != -EISNAM)
-               dev_err(&udc->dev, "failed to start %s: %d\n",
-                       udc->driver->function, ret);
-       udc->driver = NULL;
-       udc->dev.driver = NULL;
-       udc->gadget->dev.driver = NULL;
-       return ret;
-}
-
-int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
-{
-       struct usb_udc          *udc = NULL;
-       int                     ret = -ENODEV;
-
-       if (!driver || !driver->bind || !driver->setup)
-               return -EINVAL;
-
-       mutex_lock(&udc_lock);
-       if (driver->udc_name) {
-               list_for_each_entry(udc, &udc_list, list) {
-                       ret = strcmp(driver->udc_name, dev_name(&udc->dev));
-                       if (!ret)
-                               break;
-               }
-               if (!ret && !udc->driver)
-                       goto found;
-       } else {
-               list_for_each_entry(udc, &udc_list, list) {
-                       /* For now we take the first one */
-                       if (!udc->driver)
-                               goto found;
-               }
-       }
-
-       if (!driver->match_existing_only) {
-               list_add_tail(&driver->pending, &gadget_driver_pending_list);
-               pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
-                       driver->function);
-               ret = 0;
-       }
-
-       mutex_unlock(&udc_lock);
-       return ret;
-found:
-       ret = udc_bind_to_driver(udc, driver);
-       mutex_unlock(&udc_lock);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_probe_driver);
-
-int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
-{
-       struct usb_udc          *udc = NULL;
-       int                     ret = -ENODEV;
-
-       if (!driver || !driver->unbind)
-               return -EINVAL;
-
-       mutex_lock(&udc_lock);
-       list_for_each_entry(udc, &udc_list, list)
-               if (udc->driver == driver) {
-                       usb_gadget_remove_driver(udc);
-                       usb_gadget_set_state(udc->gadget,
-                                       USB_STATE_NOTATTACHED);
-                       ret = 0;
-                       break;
-               }
-
-       if (ret) {
-               list_del(&driver->pending);
-               ret = 0;
-       }
-       mutex_unlock(&udc_lock);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_unregister_driver);
-
-/* ------------------------------------------------------------------------- */
-
-static ssize_t usb_udc_srp_store(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t n)
-{
-       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
-
-       if (sysfs_streq(buf, "1"))
-               usb_gadget_wakeup(udc->gadget);
-
-       return n;
-}
-static DEVICE_ATTR(srp, S_IWUSR, NULL, usb_udc_srp_store);
-
-static ssize_t usb_udc_softconn_store(struct device *dev,
-               struct device_attribute *attr, const char *buf, size_t n)
-{
-       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
-
-       if (!udc->driver) {
-               dev_err(dev, "soft-connect without a gadget driver\n");
-               return -EOPNOTSUPP;
-       }
-
-       if (sysfs_streq(buf, "connect")) {
-               usb_gadget_udc_start(udc);
-               usb_gadget_connect(udc->gadget);
-       } else if (sysfs_streq(buf, "disconnect")) {
-               usb_gadget_disconnect(udc->gadget);
-               udc->driver->disconnect(udc->gadget);
-               usb_gadget_udc_stop(udc);
-       } else {
-               dev_err(dev, "unsupported command '%s'\n", buf);
-               return -EINVAL;
-       }
-
-       return n;
-}
-static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
-       struct usb_gadget       *gadget = udc->gadget;
-
-       return sprintf(buf, "%s\n", usb_state_string(gadget->state));
-}
-static DEVICE_ATTR_RO(state);
-
-#define USB_UDC_SPEED_ATTR(name, param)                                        \
-ssize_t name##_show(struct device *dev,                                        \
-               struct device_attribute *attr, char *buf)               \
-{                                                                      \
-       struct usb_udc *udc = container_of(dev, struct usb_udc, dev);   \
-       return snprintf(buf, PAGE_SIZE, "%s\n",                         \
-                       usb_speed_string(udc->gadget->param));          \
-}                                                                      \
-static DEVICE_ATTR_RO(name)
-
-static USB_UDC_SPEED_ATTR(current_speed, speed);
-static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
-
-#define USB_UDC_ATTR(name)                                     \
-ssize_t name##_show(struct device *dev,                                \
-               struct device_attribute *attr, char *buf)       \
-{                                                              \
-       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev); \
-       struct usb_gadget       *gadget = udc->gadget;          \
-                                                               \
-       return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name);  \
-}                                                              \
-static DEVICE_ATTR_RO(name)
-
-static USB_UDC_ATTR(is_otg);
-static USB_UDC_ATTR(is_a_peripheral);
-static USB_UDC_ATTR(b_hnp_enable);
-static USB_UDC_ATTR(a_hnp_support);
-static USB_UDC_ATTR(a_alt_hnp_support);
-static USB_UDC_ATTR(is_selfpowered);
-
-static struct attribute *usb_udc_attrs[] = {
-       &dev_attr_srp.attr,
-       &dev_attr_soft_connect.attr,
-       &dev_attr_state.attr,
-       &dev_attr_current_speed.attr,
-       &dev_attr_maximum_speed.attr,
-
-       &dev_attr_is_otg.attr,
-       &dev_attr_is_a_peripheral.attr,
-       &dev_attr_b_hnp_enable.attr,
-       &dev_attr_a_hnp_support.attr,
-       &dev_attr_a_alt_hnp_support.attr,
-       &dev_attr_is_selfpowered.attr,
-       NULL,
-};
-
-static const struct attribute_group usb_udc_attr_group = {
-       .attrs = usb_udc_attrs,
-};
-
-static const struct attribute_group *usb_udc_attr_groups[] = {
-       &usb_udc_attr_group,
-       NULL,
-};
-
-static int usb_udc_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       struct usb_udc          *udc = container_of(dev, struct usb_udc, dev);
-       int                     ret;
-
-       ret = add_uevent_var(env, "USB_UDC_NAME=%s", udc->gadget->name);
-       if (ret) {
-               dev_err(dev, "failed to add uevent USB_UDC_NAME\n");
-               return ret;
-       }
-
-       if (udc->driver) {
-               ret = add_uevent_var(env, "USB_UDC_DRIVER=%s",
-                               udc->driver->function);
-               if (ret) {
-                       dev_err(dev, "failed to add uevent USB_UDC_DRIVER\n");
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
-static int __init usb_udc_init(void)
-{
-       udc_class = class_create(THIS_MODULE, "udc");
-       if (IS_ERR(udc_class)) {
-               pr_err("failed to create udc class --> %ld\n",
-                               PTR_ERR(udc_class));
-               return PTR_ERR(udc_class);
-       }
-
-       udc_class->dev_uevent = usb_udc_uevent;
-       return 0;
-}
-subsys_initcall(usb_udc_init);
-
-static void __exit usb_udc_exit(void)
-{
-       class_destroy(udc_class);
-}
-module_exit(usb_udc_exit);
-
-MODULE_DESCRIPTION("UDC Framework");
-MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
-MODULE_LICENSE("GPL v2");
index 1cbb0ac6b18233c36535a7bd42668e623119c4c5..f8bf290f189444c5544f43d7a047981beab5dbc2 100644 (file)
@@ -2055,7 +2055,6 @@ static int xudc_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct resource *res;
        struct xusb_udc *udc;
-       struct xusb_ep *ep0;
        int irq;
        int ret;
        u32 ier;
@@ -2119,8 +2118,6 @@ static int xudc_probe(struct platform_device *pdev)
 
        xudc_eps_init(udc);
 
-       ep0 = &udc->ep[0];
-
        /* Set device address to 0.*/
        udc->write_fn(udc->addr, XUSB_ADDRESS_OFFSET, 0);
 
index d8f5674809e88e4f68d19411473ff752ce6230ef..2e710a4cca525915991bf2b2a884dd2cec844281 100644 (file)
@@ -180,7 +180,7 @@ config USB_EHCI_MXC
 config USB_EHCI_HCD_OMAP
        tristate "EHCI support for OMAP3 and later chips"
        depends on ARCH_OMAP
-       select NOP_USB_XCEIV
+       depends on NOP_USB_XCEIV
        default y
        ---help---
          Enables support for the on-chip EHCI controller on
index 1757ebb471b6d992d87d549fc6c61a7f375fcb71..6816b8c371d0cd3d9925cd78cfd70cfe4b7b812e 100644 (file)
 
 #define DRIVER_DESC "EHCI generic platform driver"
 #define EHCI_MAX_CLKS 3
+#define EHCI_MAX_RSTS 3
 #define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv)
 
 struct ehci_platform_priv {
        struct clk *clks[EHCI_MAX_CLKS];
-       struct reset_control *rst;
+       struct reset_control *rsts[EHCI_MAX_RSTS];
        struct phy **phys;
        int num_phys;
        bool reset_on_resume;
@@ -149,7 +150,7 @@ static int ehci_platform_probe(struct platform_device *dev)
        struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
        struct ehci_platform_priv *priv;
        struct ehci_hcd *ehci;
-       int err, irq, phy_num, clk = 0;
+       int err, irq, phy_num, clk = 0, rst;
 
        if (usb_disabled())
                return -ENODEV;
@@ -234,16 +235,20 @@ static int ehci_platform_probe(struct platform_device *dev)
                }
        }
 
-       priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
-       if (IS_ERR(priv->rst)) {
-               err = PTR_ERR(priv->rst);
-               if (err == -EPROBE_DEFER)
-                       goto err_put_clks;
-               priv->rst = NULL;
-       } else {
-               err = reset_control_deassert(priv->rst);
+       for (rst = 0; rst < EHCI_MAX_RSTS; rst++) {
+               priv->rsts[rst] = devm_reset_control_get_shared_by_index(
+                                       &dev->dev, rst);
+               if (IS_ERR(priv->rsts[rst])) {
+                       err = PTR_ERR(priv->rsts[rst]);
+                       if (err == -EPROBE_DEFER)
+                               goto err_reset;
+                       priv->rsts[rst] = NULL;
+                       break;
+               }
+
+               err = reset_control_deassert(priv->rsts[rst]);
                if (err)
-                       goto err_put_clks;
+                       goto err_reset;
        }
 
        if (pdata->big_endian_desc)
@@ -300,8 +305,8 @@ err_power:
        if (pdata->power_off)
                pdata->power_off(dev);
 err_reset:
-       if (priv->rst)
-               reset_control_assert(priv->rst);
+       while (--rst >= 0)
+               reset_control_assert(priv->rsts[rst]);
 err_put_clks:
        while (--clk >= 0)
                clk_put(priv->clks[clk]);
@@ -319,15 +324,15 @@ static int ehci_platform_remove(struct platform_device *dev)
        struct usb_hcd *hcd = platform_get_drvdata(dev);
        struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
        struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
-       int clk;
+       int clk, rst;
 
        usb_remove_hcd(hcd);
 
        if (pdata->power_off)
                pdata->power_off(dev);
 
-       if (priv->rst)
-               reset_control_assert(priv->rst);
+       for (rst = 0; rst < EHCI_MAX_RSTS && priv->rsts[rst]; rst++)
+               reset_control_assert(priv->rsts[rst]);
 
        for (clk = 0; clk < EHCI_MAX_CLKS && priv->clks[clk]; clk++)
                clk_put(priv->clks[clk]);
index 0449235d4f22dd2d1b068c85243e8536b5adffa7..1700908b84ef8b7c15716f07e448345f25fae392 100644 (file)
@@ -500,7 +500,6 @@ static int ohci_init (struct ohci_hcd *ohci)
 
        setup_timer(&ohci->io_watchdog, io_watchdog_func,
                        (unsigned long) ohci);
-       set_timer_slack(&ohci->io_watchdog, msecs_to_jiffies(20));
 
        ohci->hcca = dma_alloc_coherent (hcd->self.controller,
                        sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
index ae1c988da146e556f4b7fe386ad26264d0b20d90..898b74086c1292d1c63110795557155962adc1ba 100644 (file)
 
 #define DRIVER_DESC "OHCI generic platform driver"
 #define OHCI_MAX_CLKS 3
+#define OHCI_MAX_RESETS 2
 #define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv)
 
 struct ohci_platform_priv {
        struct clk *clks[OHCI_MAX_CLKS];
-       struct reset_control *rst;
+       struct reset_control *resets[OHCI_MAX_RESETS];
        struct phy **phys;
        int num_phys;
 };
@@ -117,7 +118,7 @@ static int ohci_platform_probe(struct platform_device *dev)
        struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
        struct ohci_platform_priv *priv;
        struct ohci_hcd *ohci;
-       int err, irq, phy_num, clk = 0;
+       int err, irq, phy_num, clk = 0, rst = 0;
 
        if (usb_disabled())
                return -ENODEV;
@@ -195,19 +196,21 @@ static int ohci_platform_probe(struct platform_device *dev)
                                break;
                        }
                }
-
-       }
-
-       priv->rst = devm_reset_control_get_optional(&dev->dev, NULL);
-       if (IS_ERR(priv->rst)) {
-               err = PTR_ERR(priv->rst);
-               if (err == -EPROBE_DEFER)
-                       goto err_put_clks;
-               priv->rst = NULL;
-       } else {
-               err = reset_control_deassert(priv->rst);
-               if (err)
-                       goto err_put_clks;
+               for (rst = 0; rst < OHCI_MAX_RESETS; rst++) {
+                       priv->resets[rst] =
+                               devm_reset_control_get_shared_by_index(
+                                                               &dev->dev, rst);
+                       if (IS_ERR(priv->resets[rst])) {
+                               err = PTR_ERR(priv->resets[rst]);
+                               if (err == -EPROBE_DEFER)
+                                       goto err_reset;
+                               priv->resets[rst] = NULL;
+                               break;
+                       }
+                       err = reset_control_deassert(priv->resets[rst]);
+                       if (err)
+                               goto err_reset;
+               }
        }
 
        if (pdata->big_endian_desc)
@@ -265,8 +268,8 @@ err_power:
        if (pdata->power_off)
                pdata->power_off(dev);
 err_reset:
-       if (priv->rst)
-               reset_control_assert(priv->rst);
+       while (--rst >= 0)
+               reset_control_assert(priv->resets[rst]);
 err_put_clks:
        while (--clk >= 0)
                clk_put(priv->clks[clk]);
@@ -284,15 +287,15 @@ static int ohci_platform_remove(struct platform_device *dev)
        struct usb_hcd *hcd = platform_get_drvdata(dev);
        struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
        struct ohci_platform_priv *priv = hcd_to_ohci_priv(hcd);
-       int clk;
+       int clk, rst;
 
        usb_remove_hcd(hcd);
 
        if (pdata->power_off)
                pdata->power_off(dev);
 
-       if (priv->rst)
-               reset_control_assert(priv->rst);
+       for (rst = 0; rst < OHCI_MAX_RESETS && priv->resets[rst]; rst++)
+               reset_control_assert(priv->resets[rst]);
 
        for (clk = 0; clk < OHCI_MAX_CLKS && priv->clks[clk]; clk++)
                clk_put(priv->clks[clk]);
index bad0d1f9a41d4ff9edbcb186eb86238abf878c74..6afe32381209d76cd0cf2f46d686a9f07a43f2c9 100644 (file)
@@ -37,7 +37,9 @@
  * "All components of all Command and Transfer TRBs shall be initialized to '0'"
  */
 static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
-                                       unsigned int cycle_state, gfp_t flags)
+                                              unsigned int cycle_state,
+                                              unsigned int max_packet,
+                                              gfp_t flags)
 {
        struct xhci_segment *seg;
        dma_addr_t      dma;
@@ -53,6 +55,14 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
                return NULL;
        }
 
+       if (max_packet) {
+               seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
+               if (!seg->bounce_buf) {
+                       dma_pool_free(xhci->segment_pool, seg->trbs, dma);
+                       kfree(seg);
+                       return NULL;
+               }
+       }
        /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
        if (cycle_state == 0) {
                for (i = 0; i < TRBS_PER_SEGMENT; i++)
@@ -70,6 +80,7 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
                dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
                seg->trbs = NULL;
        }
+       kfree(seg->bounce_buf);
        kfree(seg);
 }
 
@@ -317,11 +328,11 @@ static void xhci_initialize_ring_info(struct xhci_ring *ring,
 static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
                struct xhci_segment **first, struct xhci_segment **last,
                unsigned int num_segs, unsigned int cycle_state,
-               enum xhci_ring_type type, gfp_t flags)
+               enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
 {
        struct xhci_segment *prev;
 
-       prev = xhci_segment_alloc(xhci, cycle_state, flags);
+       prev = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
        if (!prev)
                return -ENOMEM;
        num_segs--;
@@ -330,7 +341,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
        while (num_segs > 0) {
                struct xhci_segment     *next;
 
-               next = xhci_segment_alloc(xhci, cycle_state, flags);
+               next = xhci_segment_alloc(xhci, cycle_state, max_packet, flags);
                if (!next) {
                        prev = *first;
                        while (prev) {
@@ -360,7 +371,7 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
  */
 static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
                unsigned int num_segs, unsigned int cycle_state,
-               enum xhci_ring_type type, gfp_t flags)
+               enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
 {
        struct xhci_ring        *ring;
        int ret;
@@ -370,13 +381,15 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
                return NULL;
 
        ring->num_segs = num_segs;
+       ring->bounce_buf_len = max_packet;
        INIT_LIST_HEAD(&ring->td_list);
        ring->type = type;
        if (num_segs == 0)
                return ring;
 
        ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg,
-                       &ring->last_seg, num_segs, cycle_state, type, flags);
+                       &ring->last_seg, num_segs, cycle_state, type,
+                       max_packet, flags);
        if (ret)
                goto fail;
 
@@ -470,7 +483,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
                        ring->num_segs : num_segs_needed;
 
        ret = xhci_alloc_segments_for_ring(xhci, &first, &last,
-                       num_segs, ring->cycle_state, ring->type, flags);
+                       num_segs, ring->cycle_state, ring->type,
+                       ring->bounce_buf_len, flags);
        if (ret)
                return -ENOMEM;
 
@@ -652,7 +666,8 @@ struct xhci_ring *xhci_stream_id_to_ring(
  */
 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
                unsigned int num_stream_ctxs,
-               unsigned int num_streams, gfp_t mem_flags)
+               unsigned int num_streams,
+               unsigned int max_packet, gfp_t mem_flags)
 {
        struct xhci_stream_info *stream_info;
        u32 cur_stream;
@@ -704,9 +719,11 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
         * and add their segment DMA addresses to the radix tree.
         * Stream 0 is reserved.
         */
+
        for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
                stream_info->stream_rings[cur_stream] =
-                       xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, mem_flags);
+                       xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
+                                       mem_flags);
                cur_ring = stream_info->stream_rings[cur_stream];
                if (!cur_ring)
                        goto cleanup_rings;
@@ -1003,7 +1020,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
        }
 
        /* Allocate endpoint 0 ring */
-       dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, flags);
+       dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
        if (!dev->eps[0].ring)
                goto fail;
 
@@ -1434,22 +1451,6 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
                return -EINVAL;
 
        ring_type = usb_endpoint_type(&ep->desc);
-       /* Set up the endpoint ring */
-       virt_dev->eps[ep_index].new_ring =
-               xhci_ring_alloc(xhci, 2, 1, ring_type, mem_flags);
-       if (!virt_dev->eps[ep_index].new_ring) {
-               /* Attempt to use the ring cache */
-               if (virt_dev->num_rings_cached == 0)
-                       return -ENOMEM;
-               virt_dev->num_rings_cached--;
-               virt_dev->eps[ep_index].new_ring =
-                       virt_dev->ring_cache[virt_dev->num_rings_cached];
-               virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
-               xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
-                                       1, ring_type);
-       }
-       virt_dev->eps[ep_index].skip = false;
-       ep_ring = virt_dev->eps[ep_index].new_ring;
 
        /*
         * Get values to fill the endpoint context, mostly from ep descriptor.
@@ -1479,6 +1480,23 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
        if ((xhci->hci_version > 0x100) && HCC2_LEC(xhci->hcc_params2))
                mult = 0;
 
+       /* Set up the endpoint ring */
+       virt_dev->eps[ep_index].new_ring =
+               xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
+       if (!virt_dev->eps[ep_index].new_ring) {
+               /* Attempt to use the ring cache */
+               if (virt_dev->num_rings_cached == 0)
+                       return -ENOMEM;
+               virt_dev->num_rings_cached--;
+               virt_dev->eps[ep_index].new_ring =
+                       virt_dev->ring_cache[virt_dev->num_rings_cached];
+               virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
+               xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
+                                       1, ring_type);
+       }
+       virt_dev->eps[ep_index].skip = false;
+       ep_ring = virt_dev->eps[ep_index].new_ring;
+
        /* Fill the endpoint context */
        ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
                                      EP_INTERVAL(interval) |
@@ -2409,7 +2427,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
                goto fail;
 
        /* Set up the command ring to have one segments for now. */
-       xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
+       xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
        if (!xhci->cmd_ring)
                goto fail;
        xhci_dbg_trace(xhci, trace_xhci_dbg_init,
@@ -2454,7 +2472,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
         */
        xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
        xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
-                                               flags);
+                                       0, flags);
        if (!xhci->event_ring)
                goto fail;
        if (xhci_check_trb_in_td_math(xhci) < 0)
index 1f3f981fe7f8182b4b4b1707aa4d5f439e556fe6..ed56bf9ed885fbb3766b6a30c9ac786023d351ec 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/platform_device.h>
 #include <linux/usb/phy.h>
 #include <linux/slab.h>
-#include <linux/usb/xhci_pdriver.h>
 #include <linux/acpi.h>
 
 #include "xhci.h"
@@ -138,8 +137,6 @@ MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
 
 static int xhci_plat_probe(struct platform_device *pdev)
 {
-       struct device_node      *node = pdev->dev.of_node;
-       struct usb_xhci_pdata   *pdata = dev_get_platdata(&pdev->dev);
        const struct of_device_id *match;
        const struct hc_driver  *driver;
        struct xhci_hcd         *xhci;
@@ -202,7 +199,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
        }
 
        xhci = hcd_to_xhci(hcd);
-       match = of_match_node(usb_xhci_of_match, node);
+       match = of_match_node(usb_xhci_of_match, pdev->dev.of_node);
        if (match) {
                const struct xhci_plat_priv *priv_match = match->data;
                struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
@@ -223,8 +220,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
                goto disable_clk;
        }
 
-       if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
-                       (pdata && pdata->usb3_lpm_capable))
+       if (device_property_read_bool(&pdev->dev, "usb3-lpm-capable"))
                xhci->quirks |= XHCI_LPM_SUPPORT;
 
        if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
index d7d502578d799a59c735d8a8ce777f1f6157498d..918e0c739b795ec3577ae1853708abf6249ac9e5 100644 (file)
@@ -66,6 +66,7 @@
 
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
+#include <linux/dma-mapping.h>
 #include "xhci.h"
 #include "xhci-trace.h"
 #include "xhci-mtk.h"
@@ -88,36 +89,25 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
        return seg->dma + (segment_offset * sizeof(*trb));
 }
 
-/* Does this link TRB point to the first segment in a ring,
- * or was the previous TRB the last TRB on the last segment in the ERST?
- */
-static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
-               struct xhci_segment *seg, union xhci_trb *trb)
+static bool trb_is_link(union xhci_trb *trb)
 {
-       if (ring == xhci->event_ring)
-               return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
-                       (seg->next == xhci->event_ring->first_seg);
-       else
-               return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
+       return TRB_TYPE_LINK_LE32(trb->link.control);
 }
 
-/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
- * segment?  I.e. would the updated event TRB pointer step off the end of the
- * event seg?
- */
-static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
-               struct xhci_segment *seg, union xhci_trb *trb)
+static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb)
 {
-       if (ring == xhci->event_ring)
-               return trb == &seg->trbs[TRBS_PER_SEGMENT];
-       else
-               return TRB_TYPE_LINK_LE32(trb->link.control);
+       return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
 }
 
-static int enqueue_is_link_trb(struct xhci_ring *ring)
+static bool last_trb_on_ring(struct xhci_ring *ring,
+                       struct xhci_segment *seg, union xhci_trb *trb)
 {
-       struct xhci_link_trb *link = &ring->enqueue->link;
-       return TRB_TYPE_LINK_LE32(link->control);
+       return last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
+}
+
+static bool link_trb_toggles_cycle(union xhci_trb *trb)
+{
+       return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
 }
 
 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
@@ -129,7 +119,7 @@ static void next_trb(struct xhci_hcd *xhci,
                struct xhci_segment **seg,
                union xhci_trb **trb)
 {
-       if (last_trb(xhci, ring, *seg, *trb)) {
+       if (trb_is_link(*trb)) {
                *seg = (*seg)->next;
                *trb = ((*seg)->trbs);
        } else {
@@ -145,32 +135,29 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
 {
        ring->deq_updates++;
 
-       /*
-        * If this is not event ring, and the dequeue pointer
-        * is not on a link TRB, there is one more usable TRB
-        */
-       if (ring->type != TYPE_EVENT &&
-                       !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
-               ring->num_trbs_free++;
-
-       do {
-               /*
-                * Update the dequeue pointer further if that was a link TRB or
-                * we're at the end of an event ring segment (which doesn't have
-                * link TRBS)
-                */
-               if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) {
-                       if (ring->type == TYPE_EVENT &&
-                                       last_trb_on_last_seg(xhci, ring,
-                                               ring->deq_seg, ring->dequeue)) {
-                               ring->cycle_state ^= 1;
-                       }
-                       ring->deq_seg = ring->deq_seg->next;
-                       ring->dequeue = ring->deq_seg->trbs;
-               } else {
+       /* event ring doesn't have link trbs, check for last trb */
+       if (ring->type == TYPE_EVENT) {
+               if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
                        ring->dequeue++;
+                       return;
                }
-       } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue));
+               if (last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
+                       ring->cycle_state ^= 1;
+               ring->deq_seg = ring->deq_seg->next;
+               ring->dequeue = ring->deq_seg->trbs;
+               return;
+       }
+
+       /* All other rings have link trbs */
+       if (!trb_is_link(ring->dequeue)) {
+               ring->dequeue++;
+               ring->num_trbs_free++;
+       }
+       while (trb_is_link(ring->dequeue)) {
+               ring->deq_seg = ring->deq_seg->next;
+               ring->dequeue = ring->deq_seg->trbs;
+       }
+       return;
 }
 
 /*
@@ -198,50 +185,42 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
 
        chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
        /* If this is not event ring, there is one less usable TRB */
-       if (ring->type != TYPE_EVENT &&
-                       !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
+       if (!trb_is_link(ring->enqueue))
                ring->num_trbs_free--;
        next = ++(ring->enqueue);
 
        ring->enq_updates++;
-       /* Update the dequeue pointer further if that was a link TRB or we're at
-        * the end of an event ring segment (which doesn't have link TRBS)
-        */
-       while (last_trb(xhci, ring, ring->enq_seg, next)) {
-               if (ring->type != TYPE_EVENT) {
-                       /*
-                        * If the caller doesn't plan on enqueueing more
-                        * TDs before ringing the doorbell, then we
-                        * don't want to give the link TRB to the
-                        * hardware just yet.  We'll give the link TRB
-                        * back in prepare_ring() just before we enqueue
-                        * the TD at the top of the ring.
-                        */
-                       if (!chain && !more_trbs_coming)
-                               break;
+       /* Update the dequeue pointer further if that was a link TRB */
+       while (trb_is_link(next)) {
 
-                       /* If we're not dealing with 0.95 hardware or
-                        * isoc rings on AMD 0.96 host,
-                        * carry over the chain bit of the previous TRB
-                        * (which may mean the chain bit is cleared).
-                        */
-                       if (!(ring->type == TYPE_ISOC &&
-                                       (xhci->quirks & XHCI_AMD_0x96_HOST))
-                                               && !xhci_link_trb_quirk(xhci)) {
-                               next->link.control &=
-                                       cpu_to_le32(~TRB_CHAIN);
-                               next->link.control |=
-                                       cpu_to_le32(chain);
-                       }
-                       /* Give this link TRB to the hardware */
-                       wmb();
-                       next->link.control ^= cpu_to_le32(TRB_CYCLE);
+               /*
+                * If the caller doesn't plan on enqueueing more TDs before
+                * ringing the doorbell, then we don't want to give the link TRB
+                * to the hardware just yet. We'll give the link TRB back in
+                * prepare_ring() just before we enqueue the TD at the top of
+                * the ring.
+                */
+               if (!chain && !more_trbs_coming)
+                       break;
 
-                       /* Toggle the cycle bit after the last ring segment. */
-                       if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
-                               ring->cycle_state ^= 1;
-                       }
+               /* If we're not dealing with 0.95 hardware or isoc rings on
+                * AMD 0.96 host, carry over the chain bit of the previous TRB
+                * (which may mean the chain bit is cleared).
+                */
+               if (!(ring->type == TYPE_ISOC &&
+                     (xhci->quirks & XHCI_AMD_0x96_HOST)) &&
+                   !xhci_link_trb_quirk(xhci)) {
+                       next->link.control &= cpu_to_le32(~TRB_CHAIN);
+                       next->link.control |= cpu_to_le32(chain);
                }
+               /* Give this link TRB to the hardware */
+               wmb();
+               next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+               /* Toggle the cycle bit after the last ring segment. */
+               if (link_trb_toggles_cycle(next))
+                       ring->cycle_state ^= 1;
+
                ring->enq_seg = ring->enq_seg->next;
                ring->enqueue = ring->enq_seg->trbs;
                next = ring->enqueue;
@@ -626,6 +605,31 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
        }
 }
 
+void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, struct xhci_ring *ring,
+                                struct xhci_td *td)
+{
+       struct device *dev = xhci_to_hcd(xhci)->self.controller;
+       struct xhci_segment *seg = td->bounce_seg;
+       struct urb *urb = td->urb;
+
+       if (!seg || !urb)
+               return;
+
+       if (usb_urb_dir_out(urb)) {
+               dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+                                DMA_TO_DEVICE);
+               return;
+       }
+
+       /* for in tranfers we need to copy the data from bounce to sg */
+       sg_pcopy_from_buffer(urb->sg, urb->num_mapped_sgs, seg->bounce_buf,
+                            seg->bounce_len, seg->bounce_offs);
+       dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
+                        DMA_FROM_DEVICE);
+       seg->bounce_len = 0;
+       seg->bounce_offs = 0;
+}
+
 /*
  * When we get a command completion for a Stop Endpoint Command, we need to
  * unlink any cancelled TDs from the ring.  There are two ways to do that:
@@ -745,6 +749,9 @@ remove_finished_td:
                /* Doesn't matter what we pass for status, since the core will
                 * just overwrite it (because the URB has been unlinked).
                 */
+               ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
+               if (ep_ring && cur_td->bounce_seg)
+                       xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
                xhci_giveback_urb_in_irq(xhci, cur_td, 0);
 
                /* Stop processing the cancelled list if the watchdog timer is
@@ -767,6 +774,9 @@ static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
                list_del_init(&cur_td->td_list);
                if (!list_empty(&cur_td->cancelled_td_list))
                        list_del_init(&cur_td->cancelled_td_list);
+
+               if (cur_td->bounce_seg)
+                       xhci_unmap_td_bounce_buffer(xhci, ring, cur_td);
                xhci_giveback_urb_in_irq(xhci, cur_td, -ESHUTDOWN);
        }
 }
@@ -917,7 +927,7 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
         * the dequeue pointer one segment further, or we'll jump off
         * the segment into la-la-land.
         */
-       if (last_trb(xhci, ep_ring, ep_ring->deq_seg, ep_ring->dequeue)) {
+       if (trb_is_link(ep_ring->dequeue)) {
                ep_ring->deq_seg = ep_ring->deq_seg->next;
                ep_ring->dequeue = ep_ring->deq_seg->trbs;
        }
@@ -926,8 +936,7 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
                /* We have more usable TRBs */
                ep_ring->num_trbs_free++;
                ep_ring->dequeue++;
-               if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
-                               ep_ring->dequeue)) {
+               if (trb_is_link(ep_ring->dequeue)) {
                        if (ep_ring->dequeue ==
                                        dev->eps[ep_index].queued_deq_ptr)
                                break;
@@ -1865,6 +1874,10 @@ td_cleanup:
        urb = td->urb;
        urb_priv = urb->hcpriv;
 
+       /* if a bounce buffer was used to align this td then unmap it */
+       if (td->bounce_seg)
+               xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
+
        /* Do one last check of the actual transfer length.
         * If the host controller said we transferred more data than the buffer
         * length, urb->actual_length will be a very big number (since it's
@@ -2865,36 +2878,29 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
                }
        }
 
-       if (enqueue_is_link_trb(ep_ring)) {
-               struct xhci_ring *ring = ep_ring;
-               union xhci_trb *next;
-
-               next = ring->enqueue;
+       while (trb_is_link(ep_ring->enqueue)) {
+               /* If we're not dealing with 0.95 hardware or isoc rings
+                * on AMD 0.96 host, clear the chain bit.
+                */
+               if (!xhci_link_trb_quirk(xhci) &&
+                   !(ep_ring->type == TYPE_ISOC &&
+                     (xhci->quirks & XHCI_AMD_0x96_HOST)))
+                       ep_ring->enqueue->link.control &=
+                               cpu_to_le32(~TRB_CHAIN);
+               else
+                       ep_ring->enqueue->link.control |=
+                               cpu_to_le32(TRB_CHAIN);
 
-               while (last_trb(xhci, ring, ring->enq_seg, next)) {
-                       /* If we're not dealing with 0.95 hardware or isoc rings
-                        * on AMD 0.96 host, clear the chain bit.
-                        */
-                       if (!xhci_link_trb_quirk(xhci) &&
-                                       !(ring->type == TYPE_ISOC &&
-                                        (xhci->quirks & XHCI_AMD_0x96_HOST)))
-                               next->link.control &= cpu_to_le32(~TRB_CHAIN);
-                       else
-                               next->link.control |= cpu_to_le32(TRB_CHAIN);
+               wmb();
+               ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
 
-                       wmb();
-                       next->link.control ^= cpu_to_le32(TRB_CYCLE);
+               /* Toggle the cycle bit after the last ring segment. */
+               if (link_trb_toggles_cycle(ep_ring->enqueue))
+                       ep_ring->cycle_state ^= 1;
 
-                       /* Toggle the cycle bit after the last ring segment. */
-                       if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
-                               ring->cycle_state ^= 1;
-                       }
-                       ring->enq_seg = ring->enq_seg->next;
-                       ring->enqueue = ring->enq_seg->trbs;
-                       next = ring->enqueue;
-               }
+               ep_ring->enq_seg = ep_ring->enq_seg->next;
+               ep_ring->enqueue = ep_ring->enq_seg->trbs;
        }
-
        return 0;
 }
 
@@ -3092,7 +3098,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  */
 static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
                              int trb_buff_len, unsigned int td_total_len,
-                             struct urb *urb, unsigned int num_trbs_left)
+                             struct urb *urb, bool more_trbs_coming)
 {
        u32 maxp, total_packet_count;
 
@@ -3101,7 +3107,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
                return ((td_total_len - transferred) >> 10);
 
        /* One TRB with a zero-length data packet. */
-       if (num_trbs_left == 0 || (transferred == 0 && trb_buff_len == 0) ||
+       if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
            trb_buff_len == td_total_len)
                return 0;
 
@@ -3116,37 +3122,103 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
        return (total_packet_count - ((transferred + trb_buff_len) / maxp));
 }
 
+
+static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
+                        u32 *trb_buff_len, struct xhci_segment *seg)
+{
+       struct device *dev = xhci_to_hcd(xhci)->self.controller;
+       unsigned int unalign;
+       unsigned int max_pkt;
+       u32 new_buff_len;
+
+       max_pkt = GET_MAX_PACKET(usb_endpoint_maxp(&urb->ep->desc));
+       unalign = (enqd_len + *trb_buff_len) % max_pkt;
+
+       /* we got lucky, last normal TRB data on segment is packet aligned */
+       if (unalign == 0)
+               return 0;
+
+       xhci_dbg(xhci, "Unaligned %d bytes, buff len %d\n",
+                unalign, *trb_buff_len);
+
+       /* is the last nornal TRB alignable by splitting it */
+       if (*trb_buff_len > unalign) {
+               *trb_buff_len -= unalign;
+               xhci_dbg(xhci, "split align, new buff len %d\n", *trb_buff_len);
+               return 0;
+       }
+
+       /*
+        * We want enqd_len + trb_buff_len to sum up to a number aligned to
+        * number which is divisible by the endpoint's wMaxPacketSize. IOW:
+        * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
+        */
+       new_buff_len = max_pkt - (enqd_len % max_pkt);
+
+       if (new_buff_len > (urb->transfer_buffer_length - enqd_len))
+               new_buff_len = (urb->transfer_buffer_length - enqd_len);
+
+       /* create a max max_pkt sized bounce buffer pointed to by last trb */
+       if (usb_urb_dir_out(urb)) {
+               sg_pcopy_to_buffer(urb->sg, urb->num_mapped_sgs,
+                                  seg->bounce_buf, new_buff_len, enqd_len);
+               seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+                                                max_pkt, DMA_TO_DEVICE);
+       } else {
+               seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+                                                max_pkt, DMA_FROM_DEVICE);
+       }
+
+       if (dma_mapping_error(dev, seg->bounce_dma)) {
+               /* try without aligning. Some host controllers survive */
+               xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n");
+               return 0;
+       }
+       *trb_buff_len = new_buff_len;
+       seg->bounce_len = new_buff_len;
+       seg->bounce_offs = enqd_len;
+
+       xhci_dbg(xhci, "Bounce align, new buff len %d\n", *trb_buff_len);
+
+       return 1;
+}
+
 /* This is very similar to what ehci-q.c qtd_fill() does */
 int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                struct urb *urb, int slot_id, unsigned int ep_index)
 {
-       struct xhci_ring *ep_ring;
+       struct xhci_ring *ring;
        struct urb_priv *urb_priv;
        struct xhci_td *td;
        struct xhci_generic_trb *start_trb;
        struct scatterlist *sg = NULL;
-       bool more_trbs_coming;
-       bool zero_length_needed;
-       unsigned int num_trbs, last_trb_num, i;
+       bool more_trbs_coming = true;
+       bool need_zero_pkt = false;
+       bool first_trb = true;
+       unsigned int num_trbs;
        unsigned int start_cycle, num_sgs = 0;
-       unsigned int running_total, block_len, trb_buff_len;
-       unsigned int full_len;
-       int ret;
+       unsigned int enqd_len, block_len, trb_buff_len, full_len;
+       int sent_len, ret;
        u32 field, length_field, remainder;
-       u64 addr;
+       u64 addr, send_addr;
 
-       ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
-       if (!ep_ring)
+       ring = xhci_urb_to_transfer_ring(xhci, urb);
+       if (!ring)
                return -EINVAL;
 
+       full_len = urb->transfer_buffer_length;
        /* If we have scatter/gather list, we use it. */
        if (urb->num_sgs) {
                num_sgs = urb->num_mapped_sgs;
                sg = urb->sg;
+               addr = (u64) sg_dma_address(sg);
+               block_len = sg_dma_len(sg);
                num_trbs = count_sg_trbs_needed(urb);
-       } else
+       } else {
                num_trbs = count_trbs_needed(urb);
-
+               addr = (u64) urb->transfer_dma;
+               block_len = full_len;
+       }
        ret = prepare_transfer(xhci, xhci->devs[slot_id],
                        ep_index, urb->stream_id,
                        num_trbs, urb, 0, mem_flags);
@@ -3155,20 +3227,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
 
        urb_priv = urb->hcpriv;
 
-       last_trb_num = num_trbs - 1;
-
        /* Deal with URB_ZERO_PACKET - need one more td/trb */
-       zero_length_needed = urb->transfer_flags & URB_ZERO_PACKET &&
-               urb_priv->length == 2;
-       if (zero_length_needed) {
-               num_trbs++;
-               xhci_dbg(xhci, "Creating zero length td.\n");
-               ret = prepare_transfer(xhci, xhci->devs[slot_id],
-                               ep_index, urb->stream_id,
-                               1, urb, 1, mem_flags);
-               if (unlikely(ret < 0))
-                       return ret;
-       }
+       if (urb->transfer_flags & URB_ZERO_PACKET && urb_priv->length > 1)
+               need_zero_pkt = true;
 
        td = urb_priv->td[0];
 
@@ -3177,102 +3238,97 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
         * until we've finished creating all the other TRBs.  The ring's cycle
         * state may change as we enqueue the other TRBs, so save it too.
         */
-       start_trb = &ep_ring->enqueue->generic;
-       start_cycle = ep_ring->cycle_state;
-
-       full_len = urb->transfer_buffer_length;
-       running_total = 0;
-       block_len = 0;
+       start_trb = &ring->enqueue->generic;
+       start_cycle = ring->cycle_state;
+       send_addr = addr;
 
        /* Queue the TRBs, even if they are zero-length */
-       for (i = 0; i < num_trbs; i++) {
+       for (enqd_len = 0; enqd_len < full_len; enqd_len += trb_buff_len) {
                field = TRB_TYPE(TRB_NORMAL);
 
-               if (block_len == 0) {
-                       /* A new contiguous block. */
-                       if (sg) {
-                               addr = (u64) sg_dma_address(sg);
-                               block_len = sg_dma_len(sg);
-                       } else {
-                               addr = (u64) urb->transfer_dma;
-                               block_len = full_len;
-                       }
-                       /* TRB buffer should not cross 64KB boundaries */
-                       trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
-                       trb_buff_len = min_t(unsigned int,
-                                                               trb_buff_len,
-                                                               block_len);
-               } else {
-                       /* Further through the contiguous block. */
-                       trb_buff_len = block_len;
-                       if (trb_buff_len > TRB_MAX_BUFF_SIZE)
-                               trb_buff_len = TRB_MAX_BUFF_SIZE;
-               }
+               /* TRB buffer should not cross 64KB boundaries */
+               trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+               trb_buff_len = min_t(unsigned int, trb_buff_len, block_len);
 
-               if (running_total + trb_buff_len > full_len)
-                       trb_buff_len = full_len - running_total;
+               if (enqd_len + trb_buff_len > full_len)
+                       trb_buff_len = full_len - enqd_len;
 
                /* Don't change the cycle bit of the first TRB until later */
-               if (i == 0) {
+               if (first_trb) {
+                       first_trb = false;
                        if (start_cycle == 0)
                                field |= TRB_CYCLE;
                } else
-                       field |= ep_ring->cycle_state;
+                       field |= ring->cycle_state;
 
                /* Chain all the TRBs together; clear the chain bit in the last
                 * TRB to indicate it's the last TRB in the chain.
                 */
-               if (i < last_trb_num) {
+               if (enqd_len + trb_buff_len < full_len) {
                        field |= TRB_CHAIN;
-               } else {
-                       field |= TRB_IOC;
-                       if (i == last_trb_num)
-                               td->last_trb = ep_ring->enqueue;
-                       else if (zero_length_needed) {
-                               trb_buff_len = 0;
-                               urb_priv->td[1]->last_trb = ep_ring->enqueue;
+                       if (trb_is_link(ring->enqueue + 1)) {
+                               if (xhci_align_td(xhci, urb, enqd_len,
+                                                 &trb_buff_len,
+                                                 ring->enq_seg)) {
+                                       send_addr = ring->enq_seg->bounce_dma;
+                                       /* assuming TD won't span 2 segs */
+                                       td->bounce_seg = ring->enq_seg;
+                               }
                        }
                }
+               if (enqd_len + trb_buff_len >= full_len) {
+                       field &= ~TRB_CHAIN;
+                       field |= TRB_IOC;
+                       more_trbs_coming = false;
+                       td->last_trb = ring->enqueue;
+               }
 
                /* Only set interrupt on short packet for IN endpoints */
                if (usb_urb_dir_in(urb))
                        field |= TRB_ISP;
 
                /* Set the TRB length, TD size, and interrupter fields. */
-               remainder = xhci_td_remainder(xhci, running_total,
-                                                       trb_buff_len, full_len,
-                                                       urb, num_trbs - i - 1);
+               remainder = xhci_td_remainder(xhci, enqd_len, trb_buff_len,
+                                             full_len, urb, more_trbs_coming);
 
                length_field = TRB_LEN(trb_buff_len) |
                        TRB_TD_SIZE(remainder) |
                        TRB_INTR_TARGET(0);
 
-               if (i < num_trbs - 1)
-                       more_trbs_coming = true;
-               else
-                       more_trbs_coming = false;
-               queue_trb(xhci, ep_ring, more_trbs_coming,
-                               lower_32_bits(addr),
-                               upper_32_bits(addr),
+               queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt,
+                               lower_32_bits(send_addr),
+                               upper_32_bits(send_addr),
                                length_field,
                                field);
 
-               running_total += trb_buff_len;
                addr += trb_buff_len;
-               block_len -= trb_buff_len;
-
-               if (sg) {
-                       if (block_len == 0) {
-                               /* New sg entry */
-                               --num_sgs;
-                               if (num_sgs == 0)
-                                       break;
+               sent_len = trb_buff_len;
+
+               while (sg && sent_len >= block_len) {
+                       /* New sg entry */
+                       --num_sgs;
+                       sent_len -= block_len;
+                       if (num_sgs != 0) {
                                sg = sg_next(sg);
+                               block_len = sg_dma_len(sg);
+                               addr = (u64) sg_dma_address(sg);
+                               addr += sent_len;
                        }
                }
+               block_len -= sent_len;
+               send_addr = addr;
+       }
+
+       if (need_zero_pkt) {
+               ret = prepare_transfer(xhci, xhci->devs[slot_id],
+                                      ep_index, urb->stream_id,
+                                      1, urb, 1, mem_flags);
+               urb_priv->td[1]->last_trb = ring->enqueue;
+               field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
+               queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
        }
 
-       check_trb_math(urb, running_total);
+       check_trb_math(urb, enqd_len);
        giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
                        start_cycle, start_trb);
        return 0;
@@ -3666,7 +3722,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        /* Set the TRB length, TD size, & interrupter fields. */
                        remainder = xhci_td_remainder(xhci, running_total,
                                                   trb_buff_len, td_len,
-                                                  urb, trbs_per_td - j - 1);
+                                                  urb, more_trbs_coming);
 
                        length_field = TRB_LEN(trb_buff_len) |
                                TRB_INTR_TARGET(0);
index f2f9518c53ab437548133349101983389c8de78b..01d96c9b3a75b95b93da48ce9f250722d03d9483 100644 (file)
@@ -490,8 +490,6 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
        xhci->comp_mode_recovery_timer.expires = jiffies +
                        msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
 
-       set_timer_slack(&xhci->comp_mode_recovery_timer,
-                       msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
        add_timer(&xhci->comp_mode_recovery_timer);
        xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
                        "Compliance mode recovery timer initialized");
@@ -3139,6 +3137,7 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
        struct xhci_input_control_ctx *ctrl_ctx;
        unsigned int ep_index;
        unsigned int num_stream_ctxs;
+       unsigned int max_packet;
        unsigned long flags;
        u32 changed_ep_bitmask = 0;
 
@@ -3212,9 +3211,11 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
 
        for (i = 0; i < num_eps; i++) {
                ep_index = xhci_get_endpoint_index(&eps[i]->desc);
+               max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&eps[i]->desc));
                vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
                                num_stream_ctxs,
-                               num_streams, mem_flags);
+                               num_streams,
+                               max_packet, mem_flags);
                if (!vdev->eps[ep_index].stream_info)
                        goto cleanup;
                /* Set maxPstreams in endpoint context and update deq ptr to
index b0b8d0f8791ae160df627f84028e0b9bf1f4d408..b2c1dc5dc0f30f17fa2f4354537c193958ab5f78 100644 (file)
@@ -1347,6 +1347,11 @@ struct xhci_segment {
        /* private to HCD */
        struct xhci_segment     *next;
        dma_addr_t              dma;
+       /* Max packet sized bounce buffer for td-fragmant alignment */
+       dma_addr_t              bounce_dma;
+       void                    *bounce_buf;
+       unsigned int            bounce_offs;
+       unsigned int            bounce_len;
 };
 
 struct xhci_td {
@@ -1356,6 +1361,7 @@ struct xhci_td {
        struct xhci_segment     *start_seg;
        union xhci_trb          *first_trb;
        union xhci_trb          *last_trb;
+       struct xhci_segment     *bounce_seg;
        /* actual_length of the URB has already been set */
        bool                    urb_length_set;
 };
@@ -1405,6 +1411,7 @@ struct xhci_ring {
        unsigned int            num_segs;
        unsigned int            num_trbs_free;
        unsigned int            num_trbs_free_temp;
+       unsigned int            bounce_buf_len;
        enum xhci_ring_type     type;
        bool                    last_td_was_short;
        struct radix_tree_root  *trb_address_map;
@@ -1807,7 +1814,8 @@ void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
                unsigned int ep_index);
 struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
                unsigned int num_stream_ctxs,
-               unsigned int num_streams, gfp_t flags);
+               unsigned int num_streams,
+               unsigned int max_packet, gfp_t flags);
 void xhci_free_stream_info(struct xhci_hcd *xhci,
                struct xhci_stream_info *stream_info);
 void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
index ccce318f20a0af4496691f0792f839bf33762ee3..7e32ae787136002fe21491ecabfc45840d541795 100644 (file)
@@ -13,11 +13,11 @@ typedef void (*mts_scsi_cmnd_callback)(struct scsi_cmnd *);
 
 struct mts_transfer_context
 {
-       struct mts_descinstance;
+       struct mts_desc *instance;
        mts_scsi_cmnd_callback final_callback;
        struct scsi_cmnd *srb;
        
-       voiddata;
+       void *data;
        unsigned data_length;
        int data_pipe;
        int fragment;
@@ -38,7 +38,7 @@ struct mts_desc {
        u8 ep_response;
        u8 ep_image;
 
-       struct Scsi_Host * host;
+       struct Scsi_Host *host;
 
        struct urb *urb;
        struct mts_transfer_context context;
index 15666ad7c772fe5a9b960e8e37f5bf93a150bcd1..02abfcdfbf7bc887b4b331a5da9066d5db8c152a 100644 (file)
@@ -1285,18 +1285,22 @@ int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data)
 }
 
 int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
-               u32 dest, int length, size_t *bytes_written)
+               u32 dest, int length)
 {
+       size_t dummy;
+
        return sisusb_write_mem_bulk(sisusb, dest, src, length,
-                       NULL, 0, bytes_written);
+                       NULL, 0, &dummy);
 }
 
 #ifdef SISUSBENDIANTEST
-int sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
-               u32 src, int length, size_t *bytes_written)
+static int sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
+               u32 src, int length)
 {
+       size_t dummy;
+
        return sisusb_read_mem_bulk(sisusb, src, dest, length,
-                       NULL, bytes_written);
+                       NULL, &dummy);
 }
 #endif
 #endif
@@ -1306,16 +1310,14 @@ static void sisusb_testreadwrite(struct sisusb_usb_data *sisusb)
 {
        static char srcbuffer[] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
        char destbuffer[10];
-       size_t dummy;
        int i, j;
 
-       sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7, &dummy);
+       sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7);
 
        for (i = 1; i <= 7; i++) {
                dev_dbg(&sisusb->sisusb_dev->dev,
                                "sisusb: rwtest %d bytes\n", i);
-               sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase,
-                               i, &dummy);
+               sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase, i);
                for (j = 0; j < i; j++) {
                        dev_dbg(&sisusb->sisusb_dev->dev,
                                        "rwtest read[%d] = %x\n",
@@ -2276,7 +2278,6 @@ int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
        const struct font_desc *myfont;
        u8 *tempbuf;
        u16 *tempbufb;
-       size_t written;
        static const char bootstring[] =
                "SiSUSB VGA text console, (C) 2005 Thomas Winischhofer.";
        static const char bootlogo[] = "(o_ //\\ V_/_";
@@ -2343,18 +2344,15 @@ int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
                                *(tempbufb++) = 0x0700 | bootstring[i++];
 
                        ret |= sisusb_copy_memory(sisusb, tempbuf,
-                                       sisusb->vrambase, 8192, &written);
+                                       sisusb->vrambase, 8192);
 
                        vfree(tempbuf);
 
                }
 
        } else if (sisusb->scrbuf) {
-
                ret |= sisusb_copy_memory(sisusb, (char *)sisusb->scrbuf,
-                               sisusb->vrambase, sisusb->scrbuf_size,
-                               &written);
-
+                               sisusb->vrambase, sisusb->scrbuf_size);
        }
 
        if (sisusb->sisusb_cursor_size_from >= 0 &&
index afa853209f1d599d73e1b939ae0bdfee244ff7c0..460cebf322e39109bb86ca4cb5eac81014ae101e 100644 (file)
@@ -370,7 +370,6 @@ static void
 sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
 {
        struct sisusb_usb_data *sisusb;
-       ssize_t written;
 
        sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
        if (!sisusb)
@@ -384,7 +383,7 @@ sisusbcon_putc(struct vc_data *c, int ch, int y, int x)
 
 
        sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
-                               (long)SISUSB_HADDR(x, y), 2, &written);
+                               (long)SISUSB_HADDR(x, y), 2);
 
        mutex_unlock(&sisusb->lock);
 }
@@ -395,7 +394,6 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
                         int count, int y, int x)
 {
        struct sisusb_usb_data *sisusb;
-       ssize_t written;
        u16 *dest;
        int i;
 
@@ -420,7 +418,7 @@ sisusbcon_putcs(struct vc_data *c, const unsigned short *s,
        }
 
        sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(x, y),
-                               (long)SISUSB_HADDR(x, y), count * 2, &written);
+                               (long)SISUSB_HADDR(x, y), count * 2);
 
        mutex_unlock(&sisusb->lock);
 }
@@ -431,7 +429,6 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
 {
        struct sisusb_usb_data *sisusb;
        u16 eattr = c->vc_video_erase_char;
-       ssize_t written;
        int i, length, cols;
        u16 *dest;
 
@@ -475,41 +472,7 @@ sisusbcon_clear(struct vc_data *c, int y, int x, int height, int width)
 
 
        sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(x, y),
-                               (long)SISUSB_HADDR(x, y), length, &written);
-
-       mutex_unlock(&sisusb->lock);
-}
-
-/* Interface routine */
-static void
-sisusbcon_bmove(struct vc_data *c, int sy, int sx,
-                        int dy, int dx, int height, int width)
-{
-       struct sisusb_usb_data *sisusb;
-       ssize_t written;
-       int cols, length;
-
-       if (width <= 0 || height <= 0)
-               return;
-
-       sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
-       if (!sisusb)
-               return;
-
-       /* sisusb->lock is down */
-
-       cols = sisusb->sisusb_num_columns;
-
-       if (sisusb_is_inactive(c, sisusb)) {
-               mutex_unlock(&sisusb->lock);
-               return;
-       }
-
-       length = ((height * cols) - dx - (cols - width - dx)) * 2;
-
-
-       sisusb_copy_memory(sisusb, (unsigned char *)SISUSB_VADDR(dx, dy),
-                               (long)SISUSB_HADDR(dx, dy), length, &written);
+                               (long)SISUSB_HADDR(x, y), length);
 
        mutex_unlock(&sisusb->lock);
 }
@@ -519,7 +482,6 @@ static int
 sisusbcon_switch(struct vc_data *c)
 {
        struct sisusb_usb_data *sisusb;
-       ssize_t written;
        int length;
 
        /* Returnvalue 0 means we have fully restored screen,
@@ -559,7 +521,7 @@ sisusbcon_switch(struct vc_data *c)
 
        sisusb_copy_memory(sisusb, (unsigned char *)c->vc_origin,
                                (long)SISUSB_HADDR(0, 0),
-                               length, &written);
+                               length);
 
        mutex_unlock(&sisusb->lock);
 
@@ -600,7 +562,7 @@ sisusbcon_save_screen(struct vc_data *c)
 }
 
 /* interface routine */
-static int
+static void
 sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
 {
        struct sisusb_usb_data *sisusb;
@@ -608,18 +570,18 @@ sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
 
        /* Return value not used by vt */
 
-       if (!CON_IS_VISIBLE(c))
-               return -EINVAL;
+       if (!con_is_visible(c))
+               return;
 
        sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
        if (!sisusb)
-               return -EINVAL;
+               return;
 
        /* sisusb->lock is down */
 
        if (sisusb_is_inactive(c, sisusb)) {
                mutex_unlock(&sisusb->lock);
-               return -EINVAL;
+               return;
        }
 
        for (i = j = 0; i < 16; i++) {
@@ -634,8 +596,6 @@ sisusbcon_set_palette(struct vc_data *c, const unsigned char *table)
        }
 
        mutex_unlock(&sisusb->lock);
-
-       return 0;
 }
 
 /* interface routine */
@@ -644,7 +604,6 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
 {
        struct sisusb_usb_data *sisusb;
        u8 sr1, cr17, pmreg, cr63;
-       ssize_t written;
        int ret = 0;
 
        sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
@@ -672,7 +631,7 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
                                (unsigned char *)c->vc_origin,
                                (u32)(sisusb->vrambase +
                                        (c->vc_origin - sisusb->scrbuf)),
-                               c->vc_screenbuf_size, &written);
+                               c->vc_screenbuf_size);
                sisusb->con_blanked = 1;
                ret = 1;
                break;
@@ -723,24 +682,22 @@ sisusbcon_blank(struct vc_data *c, int blank, int mode_switch)
 }
 
 /* interface routine */
-static int
+static void
 sisusbcon_scrolldelta(struct vc_data *c, int lines)
 {
        struct sisusb_usb_data *sisusb;
        int margin = c->vc_size_row * 4;
        int ul, we, p, st;
 
-       /* The return value does not seem to be used */
-
        sisusb = sisusb_get_sisusb_lock_and_check(c->vc_num);
        if (!sisusb)
-               return 0;
+               return;
 
        /* sisusb->lock is down */
 
        if (sisusb_is_inactive(c, sisusb)) {
                mutex_unlock(&sisusb->lock);
-               return 0;
+               return;
        }
 
        if (!lines)             /* Turn scrollback off */
@@ -780,8 +737,6 @@ sisusbcon_scrolldelta(struct vc_data *c, int lines)
        sisusbcon_set_start_address(sisusb, c);
 
        mutex_unlock(&sisusb->lock);
-
-       return 1;
 }
 
 /* Interface routine */
@@ -860,7 +815,6 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
        int cols = sisusb->sisusb_num_columns;
        int length = ((b - t) * cols) * 2;
        u16 eattr = c->vc_video_erase_char;
-       ssize_t written;
 
        /* sisusb->lock is down */
 
@@ -890,7 +844,7 @@ sisusbcon_scroll_area(struct vc_data *c, struct sisusb_usb_data *sisusb,
        }
 
        sisusb_copy_memory(sisusb, (char *)SISUSB_VADDR(0, t),
-                               (long)SISUSB_HADDR(0, t), length, &written);
+                               (long)SISUSB_HADDR(0, t), length);
 
        mutex_unlock(&sisusb->lock);
 
@@ -903,7 +857,6 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
 {
        struct sisusb_usb_data *sisusb;
        u16 eattr = c->vc_video_erase_char;
-       ssize_t written;
        int copyall = 0;
        unsigned long oldorigin;
        unsigned int delta = lines * c->vc_size_row;
@@ -996,18 +949,18 @@ sisusbcon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
                sisusb_copy_memory(sisusb,
                        (char *)c->vc_origin,
                        (u32)(sisusb->vrambase + originoffset),
-                       c->vc_screenbuf_size, &written);
+                       c->vc_screenbuf_size);
        else if (dir == SM_UP)
                sisusb_copy_memory(sisusb,
                        (char *)c->vc_origin + c->vc_screenbuf_size - delta,
                        (u32)sisusb->vrambase + originoffset +
                                        c->vc_screenbuf_size - delta,
-                       delta, &written);
+                       delta);
        else
                sisusb_copy_memory(sisusb,
                        (char *)c->vc_origin,
                        (u32)(sisusb->vrambase + originoffset),
-                       delta, &written);
+                       delta);
 
        c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
        c->vc_visible_origin = c->vc_origin;
@@ -1273,7 +1226,7 @@ sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
                        struct vc_data *vc = vc_cons[i].d;
 
                        if (vc && vc->vc_sw == &sisusb_con) {
-                               if (CON_IS_VISIBLE(vc)) {
+                               if (con_is_visible(vc)) {
                                        vc->vc_sw->con_cursor(vc, CM_DRAW);
                                }
                                vc->vc_font.height = fh;
@@ -1385,7 +1338,6 @@ static const struct consw sisusb_con = {
        .con_putcs =            sisusbcon_putcs,
        .con_cursor =           sisusbcon_cursor,
        .con_scroll =           sisusbcon_scroll,
-       .con_bmove =            sisusbcon_bmove,
        .con_switch =           sisusbcon_switch,
        .con_blank =            sisusbcon_blank,
        .con_font_set =         sisusbcon_font_set,
@@ -1433,15 +1385,12 @@ static const struct consw sisusb_dummy_con = {
        .con_putcs =            SISUSBCONDUMMY,
        .con_cursor =           SISUSBCONDUMMY,
        .con_scroll =           SISUSBCONDUMMY,
-       .con_bmove =            SISUSBCONDUMMY,
        .con_switch =           SISUSBCONDUMMY,
        .con_blank =            SISUSBCONDUMMY,
        .con_font_set =         SISUSBCONDUMMY,
        .con_font_get =         SISUSBCONDUMMY,
        .con_font_default =     SISUSBCONDUMMY,
        .con_font_copy =        SISUSBCONDUMMY,
-       .con_set_palette =      SISUSBCONDUMMY,
-       .con_scrolldelta =      SISUSBCONDUMMY,
 };
 
 int
index c46ce42d44892cb895f5d7f3a8d49a6083aa58f2..e79a616f0d26ca6d33db4d9504dff49d45aaad33 100644 (file)
@@ -828,7 +828,7 @@ void sisusb_delete(struct kref *kref);
 int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data);
 int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 * data);
 int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
-                      u32 dest, int length, size_t * bytes_written);
+                      u32 dest, int length);
 int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init);
 int sisusbcon_do_font_op(struct sisusb_usb_data *sisusb, int set, int slot,
                         u8 * arg, int cmapsz, int ch512, int dorecalc,
index b45cb77c0744599a2ae7f77a12f3e014a59cba7a..8e7737d7ac0a3e0bacdc6b2a2b67e18f9385a51e 100644 (file)
@@ -330,6 +330,17 @@ static int usb3503_i2c_probe(struct i2c_client *i2c,
        return usb3503_probe(hub);
 }
 
+static int usb3503_i2c_remove(struct i2c_client *i2c)
+{
+       struct usb3503 *hub;
+
+       hub = i2c_get_clientdata(i2c);
+       if (hub->clk)
+               clk_disable_unprepare(hub->clk);
+
+       return 0;
+}
+
 static int usb3503_platform_probe(struct platform_device *pdev)
 {
        struct usb3503 *hub;
@@ -338,10 +349,22 @@ static int usb3503_platform_probe(struct platform_device *pdev)
        if (!hub)
                return -ENOMEM;
        hub->dev = &pdev->dev;
+       platform_set_drvdata(pdev, hub);
 
        return usb3503_probe(hub);
 }
 
+static int usb3503_platform_remove(struct platform_device *pdev)
+{
+       struct usb3503 *hub;
+
+       hub = platform_get_drvdata(pdev);
+       if (hub->clk)
+               clk_disable_unprepare(hub->clk);
+
+       return 0;
+}
+
 #ifdef CONFIG_PM_SLEEP
 static int usb3503_i2c_suspend(struct device *dev)
 {
@@ -395,6 +418,7 @@ static struct i2c_driver usb3503_i2c_driver = {
                .of_match_table = of_match_ptr(usb3503_of_match),
        },
        .probe          = usb3503_i2c_probe,
+       .remove         = usb3503_i2c_remove,
        .id_table       = usb3503_id,
 };
 
@@ -404,6 +428,7 @@ static struct platform_driver usb3503_platform_driver = {
                .of_match_table = of_match_ptr(usb3503_of_match),
        },
        .probe          = usb3503_platform_probe,
+       .remove         = usb3503_platform_remove,
 };
 
 static int __init usb3503_init(void)
index f95befe18cc1907f86aaf3e78fb366062bf4be7a..689d42aba8a96d36cff8e93d122644c77ff0d69b 100644 (file)
@@ -2,9 +2,12 @@
 # for USB OTG silicon based on Mentor Graphics INVENTRA designs
 #
 
+# define_trace.h needs to know how to find our header
+CFLAGS_musb_trace.o    := -I$(src)
+
 obj-$(CONFIG_USB_MUSB_HDRC) += musb_hdrc.o
 
-musb_hdrc-y := musb_core.o
+musb_hdrc-y := musb_core.o musb_trace.o
 
 musb_hdrc-$(CONFIG_USB_MUSB_HOST)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_virthub.o musb_host.o
 musb_hdrc-$(CONFIG_USB_MUSB_GADGET)$(CONFIG_USB_MUSB_DUAL_ROLE) += musb_gadget_ep0.o musb_gadget.o
index cc134109b056d1e210bb18a6f30e82abe7051529..1ae48e64e975867a6eb03afffa92260319595b06 100644 (file)
@@ -14,6 +14,7 @@
 #include "musb_core.h"
 #include "musb_debug.h"
 #include "cppi_dma.h"
+#include "davinci.h"
 
 
 /* CPPI DMA status 7-mar-2006:
@@ -232,7 +233,7 @@ static void cppi_controller_stop(struct cppi *controller)
        musb_writel(tibase, DAVINCI_RXCPPI_INTCLR_REG,
                        DAVINCI_DMA_ALL_CHANNELS_ENABLE);
 
-       dev_dbg(musb->controller, "Tearing down RX and TX Channels\n");
+       musb_dbg(musb, "Tearing down RX and TX Channels");
        for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
                /* FIXME restructure of txdma to use bds like rxdma */
                controller->tx[i].last_processed = NULL;
@@ -297,13 +298,13 @@ cppi_channel_allocate(struct dma_controller *c,
         */
        if (transmit) {
                if (index >= ARRAY_SIZE(controller->tx)) {
-                       dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'T', index);
+                       musb_dbg(musb, "no %cX%d CPPI channel", 'T', index);
                        return NULL;
                }
                cppi_ch = controller->tx + index;
        } else {
                if (index >= ARRAY_SIZE(controller->rx)) {
-                       dev_dbg(musb->controller, "no %cX%d CPPI channel\n", 'R', index);
+                       musb_dbg(musb, "no %cX%d CPPI channel", 'R', index);
                        return NULL;
                }
                cppi_ch = controller->rx + index;
@@ -314,13 +315,13 @@ cppi_channel_allocate(struct dma_controller *c,
         * with the other DMA engine too
         */
        if (cppi_ch->hw_ep)
-               dev_dbg(musb->controller, "re-allocating DMA%d %cX channel %p\n",
+               musb_dbg(musb, "re-allocating DMA%d %cX channel %p",
                                index, transmit ? 'T' : 'R', cppi_ch);
        cppi_ch->hw_ep = ep;
        cppi_ch->channel.status = MUSB_DMA_STATUS_FREE;
        cppi_ch->channel.max_len = 0x7fffffff;
 
-       dev_dbg(musb->controller, "Allocate CPPI%d %cX\n", index, transmit ? 'T' : 'R');
+       musb_dbg(musb, "Allocate CPPI%d %cX", index, transmit ? 'T' : 'R');
        return &cppi_ch->channel;
 }
 
@@ -335,8 +336,8 @@ static void cppi_channel_release(struct dma_channel *channel)
        c = container_of(channel, struct cppi_channel, channel);
        tibase = c->controller->tibase;
        if (!c->hw_ep)
-               dev_dbg(c->controller->musb->controller,
-                       "releasing idle DMA channel %p\n", c);
+               musb_dbg(c->controller->musb,
+                       "releasing idle DMA channel %p", c);
        else if (!c->transmit)
                core_rxirq_enable(tibase, c->index + 1);
 
@@ -354,11 +355,10 @@ cppi_dump_rx(int level, struct cppi_channel *c, const char *tag)
 
        musb_ep_select(base, c->index + 1);
 
-       dev_dbg(c->controller->musb->controller,
+       musb_dbg(c->controller->musb,
                "RX DMA%d%s: %d left, csr %04x, "
                "%08x H%08x S%08x C%08x, "
-               "B%08x L%08x %08x .. %08x"
-               "\n",
+               "B%08x L%08x %08x .. %08x",
                c->index, tag,
                musb_readl(c->controller->tibase,
                        DAVINCI_RXCPPI_BUFCNT0_REG + 4 * c->index),
@@ -385,11 +385,10 @@ cppi_dump_tx(int level, struct cppi_channel *c, const char *tag)
 
        musb_ep_select(base, c->index + 1);
 
-       dev_dbg(c->controller->musb->controller,
+       musb_dbg(c->controller->musb,
                "TX DMA%d%s: csr %04x, "
                "H%08x S%08x C%08x %08x, "
-               "F%08x L%08x .. %08x"
-               "\n",
+               "F%08x L%08x .. %08x",
                c->index, tag,
                musb_readw(c->hw_ep->regs, MUSB_TXCSR),
 
@@ -590,7 +589,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
                length = min(n_bds * maxpacket, length);
        }
 
-       dev_dbg(musb->controller, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u\n",
+       musb_dbg(musb, "TX DMA%d, pktSz %d %s bds %d dma 0x%llx len %u",
                        tx->index,
                        maxpacket,
                        rndis ? "rndis" : "transparent",
@@ -647,7 +646,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
                                bd->hw_options |= CPPI_ZERO_SET;
                }
 
-               dev_dbg(musb->controller, "TXBD %p: nxt %08x buf %08x len %04x opt %08x\n",
+               musb_dbg(musb, "TXBD %p: nxt %08x buf %08x len %04x opt %08x",
                                bd, bd->hw_next, bd->hw_bufp,
                                bd->hw_off_len, bd->hw_options);
 
@@ -813,8 +812,8 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
 
        length = min(n_bds * maxpacket, length);
 
-       dev_dbg(musb->controller, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
-                       "dma 0x%llx len %u %u/%u\n",
+       musb_dbg(musb, "RX DMA%d seg, maxp %d %s bds %d (cnt %d) "
+                       "dma 0x%llx len %u %u/%u",
                        rx->index, maxpacket,
                        onepacket
                                ? (is_rndis ? "rndis" : "onepacket")
@@ -924,7 +923,7 @@ cppi_next_rx_segment(struct musb *musb, struct cppi_channel *rx, int onepacket)
                        DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4))
                        & 0xffff;
        if (i < (2 + n_bds)) {
-               dev_dbg(musb->controller, "bufcnt%d underrun - %d (for %d)\n",
+               musb_dbg(musb, "bufcnt%d underrun - %d (for %d)",
                                        rx->index, i, n_bds);
                musb_writel(tibase,
                        DAVINCI_RXCPPI_BUFCNT0_REG + (rx->index * 4),
@@ -973,7 +972,7 @@ static int cppi_channel_program(struct dma_channel *ch,
                /* WARN_ON(1); */
                break;
        case MUSB_DMA_STATUS_UNKNOWN:
-               dev_dbg(musb->controller, "%cX DMA%d not allocated!\n",
+               musb_dbg(musb, "%cX DMA%d not allocated!",
                                cppi_ch->transmit ? 'T' : 'R',
                                cppi_ch->index);
                /* FALLTHROUGH */
@@ -1029,8 +1028,8 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
                if (!completed && (bd->hw_options & CPPI_OWN_SET))
                        break;
 
-               dev_dbg(musb->controller, "C/RXBD %llx: nxt %08x buf %08x "
-                       "off.len %08x opt.len %08x (%d)\n",
+               musb_dbg(musb, "C/RXBD %llx: nxt %08x buf %08x "
+                       "off.len %08x opt.len %08x (%d)",
                        (unsigned long long)bd->dma, bd->hw_next, bd->hw_bufp,
                        bd->hw_off_len, bd->hw_options,
                        rx->channel.actual_len);
@@ -1051,7 +1050,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
                         * CPPI ignores those BDs even though OWN is still set.
                         */
                        completed = true;
-                       dev_dbg(musb->controller, "rx short %d/%d (%d)\n",
+                       musb_dbg(musb, "rx short %d/%d (%d)",
                                        len, bd->buflen,
                                        rx->channel.actual_len);
                }
@@ -1101,7 +1100,7 @@ static bool cppi_rx_scan(struct cppi *cppi, unsigned ch)
                musb_ep_select(cppi->mregs, rx->index + 1);
                csr = musb_readw(regs, MUSB_RXCSR);
                if (csr & MUSB_RXCSR_DMAENAB) {
-                       dev_dbg(musb->controller, "list%d %p/%p, last %llx%s, csr %04x\n",
+                       musb_dbg(musb, "list%d %p/%p, last %llx%s, csr %04x",
                                rx->index,
                                rx->head, rx->tail,
                                rx->last_processed
@@ -1164,7 +1163,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
                return IRQ_NONE;
        }
 
-       dev_dbg(musb->controller, "CPPI IRQ Tx%x Rx%x\n", tx, rx);
+       musb_dbg(musb, "CPPI IRQ Tx%x Rx%x", tx, rx);
 
        /* process TX channels */
        for (index = 0; tx; tx = tx >> 1, index++) {
@@ -1192,7 +1191,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
                 * that needs to be acknowledged.
                 */
                if (NULL == bd) {
-                       dev_dbg(musb->controller, "null BD\n");
+                       musb_dbg(musb, "null BD");
                        musb_writel(&tx_ram->tx_complete, 0, 0);
                        continue;
                }
@@ -1207,7 +1206,7 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
                        if (bd->hw_options & CPPI_OWN_SET)
                                break;
 
-                       dev_dbg(musb->controller, "C/TXBD %p n %x b %x off %x opt %x\n",
+                       musb_dbg(musb, "C/TXBD %p n %x b %x off %x opt %x",
                                        bd, bd->hw_next, bd->hw_bufp,
                                        bd->hw_off_len, bd->hw_options);
 
index 59bf949e589b85a086be064ffb037a77bedc7e24..7fdfb71a8f098357de798c2937724e7beebb4d71 100644 (file)
@@ -7,17 +7,10 @@
 #include <linux/list.h>
 #include <linux/errno.h>
 #include <linux/dmapool.h>
+#include <linux/dmaengine.h>
 
-#include "musb_dma.h"
 #include "musb_core.h"
-
-
-/* FIXME fully isolate CPPI from DaVinci ... the "CPPI generic" registers
- * would seem to be shared with the TUSB6020 (over VLYNQ).
- */
-
-#include "davinci.h"
-
+#include "musb_dma.h"
 
 /* CPPI RX/TX state RAM */
 
@@ -131,4 +124,24 @@ struct cppi {
 /* CPPI IRQ handler */
 extern irqreturn_t cppi_interrupt(int, void *);
 
+struct cppi41_dma_channel {
+       struct dma_channel channel;
+       struct cppi41_dma_controller *controller;
+       struct musb_hw_ep *hw_ep;
+       struct dma_chan *dc;
+       dma_cookie_t cookie;
+       u8 port_num;
+       u8 is_tx;
+       u8 is_allocated;
+       u8 usb_toggle;
+
+       dma_addr_t buf_addr;
+       u32 total_len;
+       u32 prog_len;
+       u32 transferred;
+       u32 packet_sz;
+       struct list_head tx_check;
+       int tx_zlp;
+};
+
 #endif                         /* end of ifndef _CPPI_DMA_H_ */
index f824336def5c558de1362f9dead14880ba2fb88f..74fc3069cb42c2aab7f7d173cf6f42c73c0fb133 100644 (file)
 #include <linux/usb.h>
 
 #include "musb_core.h"
+#include "musb_trace.h"
 
 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
 
@@ -258,31 +259,43 @@ static u32 musb_default_busctl_offset(u8 epnum, u16 offset)
 
 static u8 musb_default_readb(const void __iomem *addr, unsigned offset)
 {
-       return __raw_readb(addr + offset);
+       u8 data =  __raw_readb(addr + offset);
+
+       trace_musb_readb(__builtin_return_address(0), addr, offset, data);
+       return data;
 }
 
 static void musb_default_writeb(void __iomem *addr, unsigned offset, u8 data)
 {
+       trace_musb_writeb(__builtin_return_address(0), addr, offset, data);
        __raw_writeb(data, addr + offset);
 }
 
 static u16 musb_default_readw(const void __iomem *addr, unsigned offset)
 {
-       return __raw_readw(addr + offset);
+       u16 data = __raw_readw(addr + offset);
+
+       trace_musb_readw(__builtin_return_address(0), addr, offset, data);
+       return data;
 }
 
 static void musb_default_writew(void __iomem *addr, unsigned offset, u16 data)
 {
+       trace_musb_writew(__builtin_return_address(0), addr, offset, data);
        __raw_writew(data, addr + offset);
 }
 
 static u32 musb_default_readl(const void __iomem *addr, unsigned offset)
 {
-       return __raw_readl(addr + offset);
+       u32 data = __raw_readl(addr + offset);
+
+       trace_musb_readl(__builtin_return_address(0), addr, offset, data);
+       return data;
 }
 
 static void musb_default_writel(void __iomem *addr, unsigned offset, u32 data)
 {
+       trace_musb_writel(__builtin_return_address(0), addr, offset, data);
        __raw_writel(data, addr + offset);
 }
 
@@ -461,20 +474,21 @@ static void musb_otg_timer_func(unsigned long data)
        spin_lock_irqsave(&musb->lock, flags);
        switch (musb->xceiv->otg->state) {
        case OTG_STATE_B_WAIT_ACON:
-               dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n");
+               musb_dbg(musb,
+                       "HNP: b_wait_acon timeout; back to b_peripheral");
                musb_g_disconnect(musb);
                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
                musb->is_active = 0;
                break;
        case OTG_STATE_A_SUSPEND:
        case OTG_STATE_A_WAIT_BCON:
-               dev_dbg(musb->controller, "HNP: %s timeout\n",
+               musb_dbg(musb, "HNP: %s timeout",
                        usb_otg_state_string(musb->xceiv->otg->state));
                musb_platform_set_vbus(musb, 0);
                musb->xceiv->otg->state = OTG_STATE_A_WAIT_VFALL;
                break;
        default:
-               dev_dbg(musb->controller, "HNP: Unhandled mode %s\n",
+               musb_dbg(musb, "HNP: Unhandled mode %s",
                        usb_otg_state_string(musb->xceiv->otg->state));
        }
        spin_unlock_irqrestore(&musb->lock, flags);
@@ -489,17 +503,17 @@ void musb_hnp_stop(struct musb *musb)
        void __iomem    *mbase = musb->mregs;
        u8      reg;
 
-       dev_dbg(musb->controller, "HNP: stop from %s\n",
+       musb_dbg(musb, "HNP: stop from %s",
                        usb_otg_state_string(musb->xceiv->otg->state));
 
        switch (musb->xceiv->otg->state) {
        case OTG_STATE_A_PERIPHERAL:
                musb_g_disconnect(musb);
-               dev_dbg(musb->controller, "HNP: back to %s\n",
+               musb_dbg(musb, "HNP: back to %s",
                        usb_otg_state_string(musb->xceiv->otg->state));
                break;
        case OTG_STATE_B_HOST:
-               dev_dbg(musb->controller, "HNP: Disabling HR\n");
+               musb_dbg(musb, "HNP: Disabling HR");
                if (hcd)
                        hcd->self.is_b_host = 0;
                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -510,7 +524,7 @@ void musb_hnp_stop(struct musb *musb)
                /* REVISIT: Start SESSION_REQUEST here? */
                break;
        default:
-               dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n",
+               musb_dbg(musb, "HNP: Stopping in unknown state %s",
                        usb_otg_state_string(musb->xceiv->otg->state));
        }
 
@@ -541,8 +555,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
 {
        irqreturn_t handled = IRQ_NONE;
 
-       dev_dbg(musb->controller, "<== DevCtl=%02x, int_usb=0x%x\n", devctl,
-               int_usb);
+       musb_dbg(musb, "<== DevCtl=%02x, int_usb=0x%x", devctl, int_usb);
 
        /* in host mode, the peripheral may issue remote wakeup.
         * in peripheral mode, the host may resume the link.
@@ -550,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
         */
        if (int_usb & MUSB_INTR_RESUME) {
                handled = IRQ_HANDLED;
-               dev_dbg(musb->controller, "RESUME (%s)\n",
+               musb_dbg(musb, "RESUME (%s)",
                                usb_otg_state_string(musb->xceiv->otg->state));
 
                if (devctl & MUSB_DEVCTL_HM) {
@@ -619,11 +632,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
 
                if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
                                && (devctl & MUSB_DEVCTL_BDEVICE)) {
-                       dev_dbg(musb->controller, "SessReq while on B state\n");
+                       musb_dbg(musb, "SessReq while on B state");
                        return IRQ_HANDLED;
                }
 
-               dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n",
+               musb_dbg(musb, "SESSION_REQUEST (%s)",
                        usb_otg_state_string(musb->xceiv->otg->state));
 
                /* IRQ arrives from ID pin sense or (later, if VBUS power
@@ -714,7 +727,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
        }
 
        if (int_usb & MUSB_INTR_SUSPEND) {
-               dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x\n",
+               musb_dbg(musb, "SUSPEND (%s) devctl %02x",
                        usb_otg_state_string(musb->xceiv->otg->state), devctl);
                handled = IRQ_HANDLED;
 
@@ -743,7 +756,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                        musb->is_active = musb->g.b_hnp_enable;
                        if (musb->is_active) {
                                musb->xceiv->otg->state = OTG_STATE_B_WAIT_ACON;
-                               dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
+                               musb_dbg(musb, "HNP: Setting timer for b_ase0_brst");
                                mod_timer(&musb->otg_timer, jiffies
                                        + msecs_to_jiffies(
                                                        OTG_TIME_B_ASE0_BRST));
@@ -760,7 +773,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                        break;
                case OTG_STATE_B_HOST:
                        /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
-                       dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n");
+                       musb_dbg(musb, "REVISIT: SUSPEND as B_HOST");
                        break;
                default:
                        /* "should not happen" */
@@ -797,14 +810,14 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
                switch (musb->xceiv->otg->state) {
                case OTG_STATE_B_PERIPHERAL:
                        if (int_usb & MUSB_INTR_SUSPEND) {
-                               dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
+                               musb_dbg(musb, "HNP: SUSPEND+CONNECT, now b_host");
                                int_usb &= ~MUSB_INTR_SUSPEND;
                                goto b_host;
                        } else
-                               dev_dbg(musb->controller, "CONNECT as b_peripheral???\n");
+                               musb_dbg(musb, "CONNECT as b_peripheral???");
                        break;
                case OTG_STATE_B_WAIT_ACON:
-                       dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n");
+                       musb_dbg(musb, "HNP: CONNECT, now b_host");
 b_host:
                        musb->xceiv->otg->state = OTG_STATE_B_HOST;
                        if (musb->hcd)
@@ -823,12 +836,12 @@ b_host:
 
                musb_host_poke_root_hub(musb);
 
-               dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
+               musb_dbg(musb, "CONNECT (%s) devctl %02x",
                                usb_otg_state_string(musb->xceiv->otg->state), devctl);
        }
 
        if (int_usb & MUSB_INTR_DISCONNECT) {
-               dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n",
+               musb_dbg(musb, "DISCONNECT (%s) as %s, devctl %02x",
                                usb_otg_state_string(musb->xceiv->otg->state),
                                MUSB_MODE(musb), devctl);
                handled = IRQ_HANDLED;
@@ -891,7 +904,7 @@ b_host:
                        if (is_host_active(musb))
                                musb_recover_from_babble(musb);
                } else {
-                       dev_dbg(musb->controller, "BUS RESET as %s\n",
+                       musb_dbg(musb, "BUS RESET as %s",
                                usb_otg_state_string(musb->xceiv->otg->state));
                        switch (musb->xceiv->otg->state) {
                        case OTG_STATE_A_SUSPEND:
@@ -899,7 +912,7 @@ b_host:
                                /* FALLTHROUGH */
                        case OTG_STATE_A_WAIT_BCON:     /* OPT TD.4.7-900ms */
                                /* never use invalid T(a_wait_bcon) */
-                               dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n",
+                               musb_dbg(musb, "HNP: in %s, %d msec timeout",
                                        usb_otg_state_string(musb->xceiv->otg->state),
                                        TA_WAIT_BCON(musb));
                                mod_timer(&musb->otg_timer, jiffies
@@ -910,7 +923,7 @@ b_host:
                                musb_g_reset(musb);
                                break;
                        case OTG_STATE_B_WAIT_ACON:
-                               dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n",
+                               musb_dbg(musb, "HNP: RESET (%s), to b_peripheral",
                                        usb_otg_state_string(musb->xceiv->otg->state));
                                musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
                                musb_g_reset(musb);
@@ -922,7 +935,7 @@ b_host:
                                musb_g_reset(musb);
                                break;
                        default:
-                               dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n",
+                               musb_dbg(musb, "Unhandled BUS RESET as %s",
                                        usb_otg_state_string(musb->xceiv->otg->state));
                        }
                }
@@ -1030,7 +1043,7 @@ void musb_start(struct musb *musb)
        u8              devctl = musb_readb(regs, MUSB_DEVCTL);
        u8              power;
 
-       dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
+       musb_dbg(musb, "<== devctl %02x", devctl);
 
        musb_enable_interrupts(musb);
        musb_writeb(regs, MUSB_TESTMODE, 0);
@@ -1078,7 +1091,7 @@ void musb_stop(struct musb *musb)
        /* stop IRQs, timers, ... */
        musb_platform_disable(musb);
        musb_generic_disable(musb);
-       dev_dbg(musb->controller, "HDRC disabled\n");
+       musb_dbg(musb, "HDRC disabled");
 
        /* FIXME
         *  - mark host and/or peripheral drivers unusable/inactive
@@ -1391,7 +1404,7 @@ static int ep_config_from_hw(struct musb *musb)
        void __iomem *mbase = musb->mregs;
        int ret = 0;
 
-       dev_dbg(musb->controller, "<== static silicon ep config\n");
+       musb_dbg(musb, "<== static silicon ep config");
 
        /* FIXME pick up ep0 maxpacket size */
 
@@ -1532,8 +1545,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
                hw_ep->tx_reinit = 1;
 
                if (hw_ep->max_packet_sz_tx) {
-                       dev_dbg(musb->controller,
-                               "%s: hw_ep %d%s, %smax %d\n",
+                       musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
                                musb_driver_name, i,
                                hw_ep->is_shared_fifo ? "shared" : "tx",
                                hw_ep->tx_double_buffered
@@ -1541,8 +1553,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
                                hw_ep->max_packet_sz_tx);
                }
                if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
-                       dev_dbg(musb->controller,
-                               "%s: hw_ep %d%s, %smax %d\n",
+                       musb_dbg(musb, "%s: hw_ep %d%s, %smax %d",
                                musb_driver_name, i,
                                "rx",
                                hw_ep->rx_double_buffered
@@ -1550,7 +1561,7 @@ static int musb_core_init(u16 musb_type, struct musb *musb)
                                hw_ep->max_packet_sz_rx);
                }
                if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
-                       dev_dbg(musb->controller, "hw_ep %d not configured\n", i);
+                       musb_dbg(musb, "hw_ep %d not configured", i);
        }
 
        return 0;
@@ -1577,9 +1588,7 @@ irqreturn_t musb_interrupt(struct musb *musb)
 
        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
 
-       dev_dbg(musb->controller, "** IRQ %s usb%04x tx%04x rx%04x\n",
-               is_host_active(musb) ? "host" : "peripheral",
-               musb->int_usb, musb->int_tx, musb->int_rx);
+       trace_musb_isr(musb);
 
        /**
         * According to Mentor Graphics' documentation, flowchart on page 98,
@@ -1976,7 +1985,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
         * Fail when the board needs a feature that's not enabled.
         */
        if (!plat) {
-               dev_dbg(dev, "no platform_data?\n");
+               dev_err(dev, "no platform_data?\n");
                status = -ENODEV;
                goto fail0;
        }
index e499b862a946f16ff8a8c7fc4694c4da93fd4c74..d4d7c56b48c7edf70aba49d6c2b9aa04347acd7c 100644 (file)
@@ -5,7 +5,9 @@
 #include <linux/platform_device.h>
 #include <linux/of.h>
 
+#include "cppi_dma.h"
 #include "musb_core.h"
+#include "musb_trace.h"
 
 #define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
 
 #define USB_CTRL_AUTOREQ       0xd0
 #define USB_TDOWN              0xd8
 
-struct cppi41_dma_channel {
-       struct dma_channel channel;
-       struct cppi41_dma_controller *controller;
-       struct musb_hw_ep *hw_ep;
-       struct dma_chan *dc;
-       dma_cookie_t cookie;
-       u8 port_num;
-       u8 is_tx;
-       u8 is_allocated;
-       u8 usb_toggle;
-
-       dma_addr_t buf_addr;
-       u32 total_len;
-       u32 prog_len;
-       u32 transferred;
-       u32 packet_sz;
-       struct list_head tx_check;
-       int tx_zlp;
-};
-
 #define MUSB_DMA_NUM_CHANNELS 15
 
 struct cppi41_dma_controller {
@@ -96,8 +78,8 @@ static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
        if (!toggle && toggle == cppi41_channel->usb_toggle) {
                csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
                musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
-               dev_dbg(cppi41_channel->controller->musb->controller,
-                               "Restoring DATA1 toggle.\n");
+               musb_dbg(cppi41_channel->controller->musb,
+                               "Restoring DATA1 toggle.");
        }
 
        cppi41_channel->usb_toggle = toggle;
@@ -145,6 +127,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
                        csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY;
                        musb_writew(epio, MUSB_TXCSR, csr);
                }
+
+               trace_musb_cppi41_done(cppi41_channel);
                musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
        } else {
                /* next iteration, reload */
@@ -173,6 +157,7 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel)
                dma_desc->callback = cppi41_dma_callback;
                dma_desc->callback_param = &cppi41_channel->channel;
                cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+               trace_musb_cppi41_cont(cppi41_channel);
                dma_async_issue_pending(dc);
 
                if (!cppi41_channel->is_tx) {
@@ -240,10 +225,7 @@ static void cppi41_dma_callback(void *private_data)
        transferred = cppi41_channel->prog_len - txstate.residue;
        cppi41_channel->transferred += transferred;
 
-       dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
-               hw_ep->epnum, cppi41_channel->transferred,
-               cppi41_channel->total_len);
-
+       trace_musb_cppi41_gb(cppi41_channel);
        update_rx_toggle(cppi41_channel);
 
        if (cppi41_channel->transferred == cppi41_channel->total_len ||
@@ -374,12 +356,6 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
        struct musb *musb = cppi41_channel->controller->musb;
        unsigned use_gen_rndis = 0;
 
-       dev_dbg(musb->controller,
-               "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
-               cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
-               packet_sz, mode, (unsigned long long) dma_addr,
-               len, cppi41_channel->is_tx);
-
        cppi41_channel->buf_addr = dma_addr;
        cppi41_channel->total_len = len;
        cppi41_channel->transferred = 0;
@@ -431,6 +407,8 @@ static bool cppi41_configure_channel(struct dma_channel *channel,
        cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
        cppi41_channel->channel.rx_packet_done = false;
 
+       trace_musb_cppi41_config(cppi41_channel);
+
        save_rx_toggle(cppi41_channel);
        dma_async_issue_pending(dc);
        return true;
@@ -461,6 +439,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
        cppi41_channel->hw_ep = hw_ep;
        cppi41_channel->is_allocated = 1;
 
+       trace_musb_cppi41_alloc(cppi41_channel);
        return &cppi41_channel->channel;
 }
 
@@ -468,6 +447,7 @@ static void cppi41_dma_channel_release(struct dma_channel *channel)
 {
        struct cppi41_dma_channel *cppi41_channel = channel->private_data;
 
+       trace_musb_cppi41_free(cppi41_channel);
        if (cppi41_channel->is_allocated) {
                cppi41_channel->is_allocated = 0;
                channel->status = MUSB_DMA_STATUS_FREE;
@@ -537,8 +517,7 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
        u16 csr;
 
        is_tx = cppi41_channel->is_tx;
-       dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
-                       cppi41_channel->port_num, is_tx);
+       trace_musb_cppi41_abort(cppi41_channel);
 
        if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
                return 0;
index 27ba8f7994620a2c1b13fe11665401319d57c78a..9a78877a8afec87332069af0744c33ddaf6db485 100644 (file)
@@ -42,6 +42,8 @@
 #define INFO(fmt, args...) yprintk(KERN_INFO, fmt, ## args)
 #define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
 
+void musb_dbg(struct musb *musb, const char *fmt, ...);
+
 #ifdef CONFIG_DEBUG_FS
 int musb_init_debugfs(struct musb *musb);
 void musb_exit_debugfs(struct musb *musb);
index eeb7d9ecf7df34ea1aa906330f55ba40952f4be1..2537179636db00bcf8aa2016a8f7489973112766 100644 (file)
 
 static const struct of_device_id musb_dsps_of_match[];
 
-/**
- * avoid using musb_readx()/musb_writex() as glue layer should not be
- * dependent on musb core layer symbols.
- */
-static inline u8 dsps_readb(const void __iomem *addr, unsigned offset)
-{
-       return __raw_readb(addr + offset);
-}
-
-static inline u32 dsps_readl(const void __iomem *addr, unsigned offset)
-{
-       return __raw_readl(addr + offset);
-}
-
-static inline void dsps_writeb(void __iomem *addr, unsigned offset, u8 data)
-{
-       __raw_writeb(data, addr + offset);
-}
-
-static inline void dsps_writel(void __iomem *addr, unsigned offset, u32 data)
-{
-       __raw_writel(data, addr + offset);
-}
-
 /**
  * DSPS musb wrapper register offset.
  * FIXME: This should be expanded to have all the wrapper registers from TI DSPS
@@ -223,8 +199,8 @@ static void dsps_musb_enable(struct musb *musb)
               ((musb->epmask & wrp->rxep_mask) << wrp->rxep_shift);
        coremask = (wrp->usb_bitmap & ~MUSB_INTR_SOF);
 
-       dsps_writel(reg_base, wrp->epintr_set, epmask);
-       dsps_writel(reg_base, wrp->coreintr_set, coremask);
+       musb_writel(reg_base, wrp->epintr_set, epmask);
+       musb_writel(reg_base, wrp->coreintr_set, coremask);
        /* start polling for ID change in dual-role idle mode */
        if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
                        musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
@@ -244,10 +220,10 @@ static void dsps_musb_disable(struct musb *musb)
        const struct dsps_musb_wrapper *wrp = glue->wrp;
        void __iomem *reg_base = musb->ctrl_base;
 
-       dsps_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
-       dsps_writel(reg_base, wrp->epintr_clear,
+       musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
+       musb_writel(reg_base, wrp->epintr_clear,
                         wrp->txep_bitmap | wrp->rxep_bitmap);
-       dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
+       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
 }
 
 static void otg_timer(unsigned long _musb)
@@ -265,14 +241,14 @@ static void otg_timer(unsigned long _musb)
         * We poll because DSPS IP's won't expose several OTG-critical
         * status change events (from the transceiver) otherwise.
         */
-       devctl = dsps_readb(mregs, MUSB_DEVCTL);
+       devctl = musb_readb(mregs, MUSB_DEVCTL);
        dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
                                usb_otg_state_string(musb->xceiv->otg->state));
 
        spin_lock_irqsave(&musb->lock, flags);
        switch (musb->xceiv->otg->state) {
        case OTG_STATE_A_WAIT_BCON:
-               dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
+               musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
                skip_session = 1;
                /* fall */
 
@@ -286,13 +262,13 @@ static void otg_timer(unsigned long _musb)
                        MUSB_HST_MODE(musb);
                }
                if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
-                       dsps_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
+                       musb_writeb(mregs, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
                mod_timer(&glue->timer, jiffies +
                                msecs_to_jiffies(wrp->poll_timeout));
                break;
        case OTG_STATE_A_WAIT_VFALL:
                musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
-               dsps_writel(musb->ctrl_base, wrp->coreintr_set,
+               musb_writel(musb->ctrl_base, wrp->coreintr_set,
                            MUSB_INTR_VBUSERROR << wrp->usb_shift);
                break;
        default:
@@ -315,29 +291,29 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
        spin_lock_irqsave(&musb->lock, flags);
 
        /* Get endpoint interrupts */
-       epintr = dsps_readl(reg_base, wrp->epintr_status);
+       epintr = musb_readl(reg_base, wrp->epintr_status);
        musb->int_rx = (epintr & wrp->rxep_bitmap) >> wrp->rxep_shift;
        musb->int_tx = (epintr & wrp->txep_bitmap) >> wrp->txep_shift;
 
        if (epintr)
-               dsps_writel(reg_base, wrp->epintr_status, epintr);
+               musb_writel(reg_base, wrp->epintr_status, epintr);
 
        /* Get usb core interrupts */
-       usbintr = dsps_readl(reg_base, wrp->coreintr_status);
+       usbintr = musb_readl(reg_base, wrp->coreintr_status);
        if (!usbintr && !epintr)
                goto out;
 
        musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift;
        if (usbintr)
-               dsps_writel(reg_base, wrp->coreintr_status, usbintr);
+               musb_writel(reg_base, wrp->coreintr_status, usbintr);
 
        dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n",
                        usbintr, epintr);
 
        if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) {
-               int drvvbus = dsps_readl(reg_base, wrp->status);
+               int drvvbus = musb_readl(reg_base, wrp->status);
                void __iomem *mregs = musb->mregs;
-               u8 devctl = dsps_readb(mregs, MUSB_DEVCTL);
+               u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
                int err;
 
                err = musb->int_usb & MUSB_INTR_VBUSERROR;
@@ -442,7 +418,7 @@ static int dsps_musb_init(struct musb *musb)
        musb->phy = devm_phy_get(dev->parent, "usb2-phy");
 
        /* Returns zero if e.g. not clocked */
-       rev = dsps_readl(reg_base, wrp->revision);
+       rev = musb_readl(reg_base, wrp->revision);
        if (!rev)
                return -ENODEV;
 
@@ -463,14 +439,14 @@ static int dsps_musb_init(struct musb *musb)
        setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
 
        /* Reset the musb */
-       dsps_writel(reg_base, wrp->control, (1 << wrp->reset));
+       musb_writel(reg_base, wrp->control, (1 << wrp->reset));
 
        musb->isr = dsps_interrupt;
 
        /* reset the otgdisable bit, needed for host mode to work */
-       val = dsps_readl(reg_base, wrp->phy_utmi);
+       val = musb_readl(reg_base, wrp->phy_utmi);
        val &= ~(1 << wrp->otg_disable);
-       dsps_writel(musb->ctrl_base, wrp->phy_utmi, val);
+       musb_writel(musb->ctrl_base, wrp->phy_utmi, val);
 
        /*
         *  Check whether the dsps version has babble control enabled.
@@ -478,11 +454,11 @@ static int dsps_musb_init(struct musb *musb)
         * If MUSB_BABBLE_CTL returns 0x4 then we have the babble control
         * logic enabled.
         */
-       val = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+       val = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
        if (val & MUSB_BABBLE_RCV_DISABLE) {
                glue->sw_babble_enabled = true;
                val |= MUSB_BABBLE_SW_SESSION_CTRL;
-               dsps_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
+               musb_writeb(musb->mregs, MUSB_BABBLE_CTL, val);
        }
 
        return dsps_musb_dbg_init(musb, glue);
@@ -510,7 +486,7 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
        void __iomem *ctrl_base = musb->ctrl_base;
        u32 reg;
 
-       reg = dsps_readl(ctrl_base, wrp->mode);
+       reg = musb_readl(ctrl_base, wrp->mode);
 
        switch (mode) {
        case MUSB_HOST:
@@ -523,8 +499,8 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
                 */
                reg |= (1 << wrp->iddig_mux);
 
-               dsps_writel(ctrl_base, wrp->mode, reg);
-               dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
+               musb_writel(ctrl_base, wrp->mode, reg);
+               musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
                break;
        case MUSB_PERIPHERAL:
                reg |= (1 << wrp->iddig);
@@ -536,10 +512,10 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
                 */
                reg |= (1 << wrp->iddig_mux);
 
-               dsps_writel(ctrl_base, wrp->mode, reg);
+               musb_writel(ctrl_base, wrp->mode, reg);
                break;
        case MUSB_OTG:
-               dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
+               musb_writel(ctrl_base, wrp->phy_utmi, 0x02);
                break;
        default:
                dev_err(glue->dev, "unsupported mode %d\n", mode);
@@ -554,7 +530,7 @@ static bool dsps_sw_babble_control(struct musb *musb)
        u8 babble_ctl;
        bool session_restart =  false;
 
-       babble_ctl = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+       babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
        dev_dbg(musb->controller, "babble: MUSB_BABBLE_CTL value %x\n",
                babble_ctl);
        /*
@@ -571,14 +547,14 @@ static bool dsps_sw_babble_control(struct musb *musb)
                 * babble is due to noise, then set transmit idle (d7 bit)
                 * to resume normal operation
                 */
-               babble_ctl = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+               babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
                babble_ctl |= MUSB_BABBLE_FORCE_TXIDLE;
-               dsps_writeb(musb->mregs, MUSB_BABBLE_CTL, babble_ctl);
+               musb_writeb(musb->mregs, MUSB_BABBLE_CTL, babble_ctl);
 
                /* wait till line monitor flag cleared */
                dev_dbg(musb->controller, "Set TXIDLE, wait J to clear\n");
                do {
-                       babble_ctl = dsps_readb(musb->mregs, MUSB_BABBLE_CTL);
+                       babble_ctl = musb_readb(musb->mregs, MUSB_BABBLE_CTL);
                        udelay(1);
                } while ((babble_ctl & MUSB_BABBLE_STUCK_J) && timeout--);
 
@@ -896,13 +872,13 @@ static int dsps_suspend(struct device *dev)
                return 0;
 
        mbase = musb->ctrl_base;
-       glue->context.control = dsps_readl(mbase, wrp->control);
-       glue->context.epintr = dsps_readl(mbase, wrp->epintr_set);
-       glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set);
-       glue->context.phy_utmi = dsps_readl(mbase, wrp->phy_utmi);
-       glue->context.mode = dsps_readl(mbase, wrp->mode);
-       glue->context.tx_mode = dsps_readl(mbase, wrp->tx_mode);
-       glue->context.rx_mode = dsps_readl(mbase, wrp->rx_mode);
+       glue->context.control = musb_readl(mbase, wrp->control);
+       glue->context.epintr = musb_readl(mbase, wrp->epintr_set);
+       glue->context.coreintr = musb_readl(mbase, wrp->coreintr_set);
+       glue->context.phy_utmi = musb_readl(mbase, wrp->phy_utmi);
+       glue->context.mode = musb_readl(mbase, wrp->mode);
+       glue->context.tx_mode = musb_readl(mbase, wrp->tx_mode);
+       glue->context.rx_mode = musb_readl(mbase, wrp->rx_mode);
 
        return 0;
 }
@@ -918,13 +894,13 @@ static int dsps_resume(struct device *dev)
                return 0;
 
        mbase = musb->ctrl_base;
-       dsps_writel(mbase, wrp->control, glue->context.control);
-       dsps_writel(mbase, wrp->epintr_set, glue->context.epintr);
-       dsps_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
-       dsps_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
-       dsps_writel(mbase, wrp->mode, glue->context.mode);
-       dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
-       dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
+       musb_writel(mbase, wrp->control, glue->context.control);
+       musb_writel(mbase, wrp->epintr_set, glue->context.epintr);
+       musb_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
+       musb_writel(mbase, wrp->phy_utmi, glue->context.phy_utmi);
+       musb_writel(mbase, wrp->mode, glue->context.mode);
+       musb_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
+       musb_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
        if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
            musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
                mod_timer(&glue->timer, jiffies +
index af2a3a7addf904a736e13d81d95f5caac5630f50..6d1e975e9605cf845a8a656747beb0269b2a53c4 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/slab.h>
 
 #include "musb_core.h"
+#include "musb_trace.h"
 
 
 /* ----------------------------------------------------------------------- */
@@ -167,15 +168,7 @@ __acquires(ep->musb->lock)
        if (!dma_mapping_error(&musb->g.dev, request->dma))
                unmap_dma_buffer(req, musb);
 
-       if (request->status == 0)
-               dev_dbg(musb->controller, "%s done request %p,  %d/%d\n",
-                               ep->end_point.name, request,
-                               req->request.actual, req->request.length);
-       else
-               dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
-                               ep->end_point.name, request,
-                               req->request.actual, req->request.length,
-                               request->status);
+       trace_musb_req_gb(req);
        usb_gadget_giveback_request(&req->ep->end_point, &req->request);
        spin_lock(&musb->lock);
        ep->busy = busy;
@@ -217,8 +210,7 @@ static void nuke(struct musb_ep *ep, const int status)
                }
 
                value = c->channel_abort(ep->dma);
-               dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
-                               ep->name, value);
+               musb_dbg(musb, "%s: abort DMA --> %d", ep->name, value);
                c->channel_release(ep->dma);
                ep->dma = NULL;
        }
@@ -266,14 +258,14 @@ static void txstate(struct musb *musb, struct musb_request *req)
 
        /* Check if EP is disabled */
        if (!musb_ep->desc) {
-               dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+               musb_dbg(musb, "ep:%s disabled - ignore request",
                                                musb_ep->end_point.name);
                return;
        }
 
        /* we shouldn't get here while DMA is active ... but we do ... */
        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
-               dev_dbg(musb->controller, "dma pending...\n");
+               musb_dbg(musb, "dma pending...");
                return;
        }
 
@@ -285,18 +277,18 @@ static void txstate(struct musb *musb, struct musb_request *req)
                        (int)(request->length - request->actual));
 
        if (csr & MUSB_TXCSR_TXPKTRDY) {
-               dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
+               musb_dbg(musb, "%s old packet still ready , txcsr %03x",
                                musb_ep->end_point.name, csr);
                return;
        }
 
        if (csr & MUSB_TXCSR_P_SENDSTALL) {
-               dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
+               musb_dbg(musb, "%s stalling, txcsr %03x",
                                musb_ep->end_point.name, csr);
                return;
        }
 
-       dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
+       musb_dbg(musb, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x",
                        epnum, musb_ep->packet_sz, fifo_count,
                        csr);
 
@@ -424,7 +416,7 @@ static void txstate(struct musb *musb, struct musb_request *req)
        }
 
        /* host may already have the data when this message shows... */
-       dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
+       musb_dbg(musb, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d",
                        musb_ep->end_point.name, use_dma ? "dma" : "pio",
                        request->actual, request->length,
                        musb_readw(epio, MUSB_TXCSR),
@@ -450,8 +442,9 @@ void musb_g_tx(struct musb *musb, u8 epnum)
        req = next_request(musb_ep);
        request = &req->request;
 
+       trace_musb_req_tx(req);
        csr = musb_readw(epio, MUSB_TXCSR);
-       dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
+       musb_dbg(musb, "<== %s, txcsr %04x", musb_ep->end_point.name, csr);
 
        dma = is_dma_capable() ? musb_ep->dma : NULL;
 
@@ -480,7 +473,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                 * SHOULD NOT HAPPEN... has with CPPI though, after
                 * changing SENDSTALL (and other cases); harmless?
                 */
-               dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
+               musb_dbg(musb, "%s dma still busy?", musb_ep->end_point.name);
                return;
        }
 
@@ -497,7 +490,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                        /* Ensure writebuffer is empty. */
                        csr = musb_readw(epio, MUSB_TXCSR);
                        request->actual += musb_ep->dma->actual_len;
-                       dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
+                       musb_dbg(musb, "TXCSR%d %04x, DMA off, len %zu, req %p",
                                epnum, csr, musb_ep->dma->actual_len, request);
                }
 
@@ -524,7 +517,6 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                        if (csr & MUSB_TXCSR_TXPKTRDY)
                                return;
 
-                       dev_dbg(musb->controller, "sending zero pkt\n");
                        musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
                                        | MUSB_TXCSR_TXPKTRDY);
                        request->zero = 0;
@@ -543,7 +535,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
                        musb_ep_select(mbase, epnum);
                        req = musb_ep->desc ? next_request(musb_ep) : NULL;
                        if (!req) {
-                               dev_dbg(musb->controller, "%s idle now\n",
+                               musb_dbg(musb, "%s idle now",
                                        musb_ep->end_point.name);
                                return;
                        }
@@ -579,19 +571,19 @@ static void rxstate(struct musb *musb, struct musb_request *req)
 
        /* Check if EP is disabled */
        if (!musb_ep->desc) {
-               dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
+               musb_dbg(musb, "ep:%s disabled - ignore request",
                                                musb_ep->end_point.name);
                return;
        }
 
        /* We shouldn't get here while DMA is active, but we do... */
        if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
-               dev_dbg(musb->controller, "DMA pending...\n");
+               musb_dbg(musb, "DMA pending...");
                return;
        }
 
        if (csr & MUSB_RXCSR_P_SENDSTALL) {
-               dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
+               musb_dbg(musb, "%s stalling, RXCSR %04x",
                    musb_ep->end_point.name, csr);
                return;
        }
@@ -766,7 +758,7 @@ static void rxstate(struct musb *musb, struct musb_request *req)
                        }
 
                        len = request->length - request->actual;
-                       dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
+                       musb_dbg(musb, "%s OUT/RX pio fifo %d/%d, maxpacket %d",
                                        musb_ep->end_point.name,
                                        fifo_count, len,
                                        musb_ep->packet_sz);
@@ -849,12 +841,13 @@ void musb_g_rx(struct musb *musb, u8 epnum)
        if (!req)
                return;
 
+       trace_musb_req_rx(req);
        request = &req->request;
 
        csr = musb_readw(epio, MUSB_RXCSR);
        dma = is_dma_capable() ? musb_ep->dma : NULL;
 
-       dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
+       musb_dbg(musb, "<== %s, rxcsr %04x%s %p", musb_ep->end_point.name,
                        csr, dma ? " (dma)" : "", request);
 
        if (csr & MUSB_RXCSR_P_SENTSTALL) {
@@ -869,18 +862,18 @@ void musb_g_rx(struct musb *musb, u8 epnum)
                csr &= ~MUSB_RXCSR_P_OVERRUN;
                musb_writew(epio, MUSB_RXCSR, csr);
 
-               dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
+               musb_dbg(musb, "%s iso overrun on %p", musb_ep->name, request);
                if (request->status == -EINPROGRESS)
                        request->status = -EOVERFLOW;
        }
        if (csr & MUSB_RXCSR_INCOMPRX) {
                /* REVISIT not necessarily an error */
-               dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
+               musb_dbg(musb, "%s, incomprx", musb_ep->end_point.name);
        }
 
        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
                /* "should not happen"; likely RXPKTRDY pending for DMA */
-               dev_dbg(musb->controller, "%s busy, csr %04x\n",
+               musb_dbg(musb, "%s busy, csr %04x",
                        musb_ep->end_point.name, csr);
                return;
        }
@@ -894,11 +887,6 @@ void musb_g_rx(struct musb *musb, u8 epnum)
 
                request->actual += musb_ep->dma->actual_len;
 
-               dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
-                       epnum, csr,
-                       musb_readw(epio, MUSB_RXCSR),
-                       musb_ep->dma->actual_len, request);
-
 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
        defined(CONFIG_USB_UX500_DMA)
                /* Autoclear doesn't clear RxPktRdy for short packets */
@@ -996,7 +984,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
                        ok = musb->hb_iso_rx;
 
                if (!ok) {
-                       dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
+                       musb_dbg(musb, "no support for high bandwidth ISO");
                        goto fail;
                }
                musb_ep->hb_mult = (tmp >> 11) & 3;
@@ -1019,7 +1007,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
                        goto fail;
 
                if (tmp > hw_ep->max_packet_sz_tx) {
-                       dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
+                       musb_dbg(musb, "packet size beyond hardware FIFO size");
                        goto fail;
                }
 
@@ -1062,7 +1050,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
                        goto fail;
 
                if (tmp > hw_ep->max_packet_sz_rx) {
-                       dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
+                       musb_dbg(musb, "packet size beyond hardware FIFO size");
                        goto fail;
                }
 
@@ -1174,7 +1162,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
 
        spin_unlock_irqrestore(&(musb->lock), flags);
 
-       dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
+       musb_dbg(musb, "%s", musb_ep->end_point.name);
 
        return status;
 }
@@ -1186,19 +1174,17 @@ static int musb_gadget_disable(struct usb_ep *ep)
 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
 {
        struct musb_ep          *musb_ep = to_musb_ep(ep);
-       struct musb             *musb = musb_ep->musb;
        struct musb_request     *request = NULL;
 
        request = kzalloc(sizeof *request, gfp_flags);
-       if (!request) {
-               dev_dbg(musb->controller, "not enough memory\n");
+       if (!request)
                return NULL;
-       }
 
        request->request.dma = DMA_ADDR_INVALID;
        request->epnum = musb_ep->current_epnum;
        request->ep = musb_ep;
 
+       trace_musb_req_alloc(request);
        return &request->request;
 }
 
@@ -1208,7 +1194,10 @@ struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
  */
 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
 {
-       kfree(to_musb_request(req));
+       struct musb_request *request = to_musb_request(req);
+
+       trace_musb_req_free(request);
+       kfree(request);
 }
 
 static LIST_HEAD(buffers);
@@ -1225,10 +1214,7 @@ struct free_record {
  */
 void musb_ep_restart(struct musb *musb, struct musb_request *req)
 {
-       dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
-               req->tx ? "TX/IN" : "RX/OUT",
-               &req->request, req->request.length, req->epnum);
-
+       trace_musb_req_start(req);
        musb_ep_select(musb->mregs, req->epnum);
        if (req->tx)
                txstate(musb, req);
@@ -1259,7 +1245,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
        if (request->ep != musb_ep)
                return -EINVAL;
 
-       dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
+       trace_musb_req_enq(request);
 
        /* request is mine now... */
        request->request.actual = 0;
@@ -1273,7 +1259,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
 
        /* don't queue if the ep is down */
        if (!musb_ep->desc) {
-               dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
+               musb_dbg(musb, "req %p queued to %s while ep %s",
                                req, ep->name, "disabled");
                status = -ESHUTDOWN;
                unmap_dma_buffer(request, musb);
@@ -1301,9 +1287,11 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
        int                     status = 0;
        struct musb             *musb = musb_ep->musb;
 
-       if (!ep || !request || to_musb_request(request)->ep != musb_ep)
+       if (!ep || !request || req->ep != musb_ep)
                return -EINVAL;
 
+       trace_musb_req_deq(req);
+
        spin_lock_irqsave(&musb->lock, flags);
 
        list_for_each_entry(r, &musb_ep->req_list, list) {
@@ -1311,7 +1299,8 @@ static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
                        break;
        }
        if (r != req) {
-               dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
+               dev_err(musb->controller, "request %p not queued to %s\n",
+                               request, ep->name);
                status = -EINVAL;
                goto done;
        }
@@ -1377,7 +1366,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
        request = next_request(musb_ep);
        if (value) {
                if (request) {
-                       dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
+                       musb_dbg(musb, "request in progress, cannot halt %s",
                            ep->name);
                        status = -EAGAIN;
                        goto done;
@@ -1386,7 +1375,8 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
                if (musb_ep->is_in) {
                        csr = musb_readw(epio, MUSB_TXCSR);
                        if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
-                               dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
+                               musb_dbg(musb, "FIFO busy, cannot halt %s",
+                                               ep->name);
                                status = -EAGAIN;
                                goto done;
                        }
@@ -1395,7 +1385,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
                musb_ep->wedged = 0;
 
        /* set/clear the stall and toggle bits */
-       dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
+       musb_dbg(musb, "%s: %s stall", ep->name, value ? "set" : "clear");
        if (musb_ep->is_in) {
                csr = musb_readw(epio, MUSB_TXCSR);
                csr |= MUSB_TXCSR_P_WZC_BITS
@@ -1422,7 +1412,7 @@ static int musb_gadget_set_halt(struct usb_ep *ep, int value)
 
        /* maybe start the first request in the queue */
        if (!musb_ep->busy && !value && request) {
-               dev_dbg(musb->controller, "restarting the request\n");
+               musb_dbg(musb, "restarting the request");
                musb_ep_restart(musb, request);
        }
 
@@ -1558,7 +1548,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
        case OTG_STATE_B_IDLE:
                /* Start SRP ... OTG not required. */
                devctl = musb_readb(mregs, MUSB_DEVCTL);
-               dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
+               musb_dbg(musb, "Sending SRP: devctl: %02x", devctl);
                devctl |= MUSB_DEVCTL_SESSION;
                musb_writeb(mregs, MUSB_DEVCTL, devctl);
                devctl = musb_readb(mregs, MUSB_DEVCTL);
@@ -1586,7 +1576,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
                status = 0;
                goto done;
        default:
-               dev_dbg(musb->controller, "Unhandled wake: %s\n",
+               musb_dbg(musb, "Unhandled wake: %s",
                        usb_otg_state_string(musb->xceiv->otg->state));
                goto done;
        }
@@ -1596,7 +1586,7 @@ static int musb_gadget_wakeup(struct usb_gadget *gadget)
        power = musb_readb(mregs, MUSB_POWER);
        power |= MUSB_POWER_RESUME;
        musb_writeb(mregs, MUSB_POWER, power);
-       dev_dbg(musb->controller, "issue wakeup\n");
+       musb_dbg(musb, "issue wakeup");
 
        /* FIXME do this next chunk in a timer callback, no udelay */
        mdelay(2);
@@ -1628,7 +1618,7 @@ static void musb_pullup(struct musb *musb, int is_on)
 
        /* FIXME if on, HdrcStart; if off, HdrcStop */
 
-       dev_dbg(musb->controller, "gadget D+ pullup %s\n",
+       musb_dbg(musb, "gadget D+ pullup %s",
                is_on ? "on" : "off");
        musb_writeb(musb->mregs, MUSB_POWER, power);
 }
@@ -1636,7 +1626,7 @@ static void musb_pullup(struct musb *musb, int is_on)
 #if 0
 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
 {
-       dev_dbg(musb->controller, "<= %s =>\n", __func__);
+       musb_dbg(musb, "<= %s =>\n", __func__);
 
        /*
         * FIXME iff driver's softconnect flag is set (as it is during probe,
@@ -2011,7 +2001,7 @@ void musb_g_suspend(struct musb *musb)
        u8      devctl;
 
        devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-       dev_dbg(musb->controller, "devctl %02x\n", devctl);
+       musb_dbg(musb, "musb_g_suspend: devctl %02x", devctl);
 
        switch (musb->xceiv->otg->state) {
        case OTG_STATE_B_IDLE:
@@ -2030,7 +2020,7 @@ void musb_g_suspend(struct musb *musb)
                /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
                 * A_PERIPHERAL may need care too
                 */
-               WARNING("unhandled SUSPEND transition (%s)\n",
+               WARNING("unhandled SUSPEND transition (%s)",
                                usb_otg_state_string(musb->xceiv->otg->state));
        }
 }
@@ -2047,7 +2037,7 @@ void musb_g_disconnect(struct musb *musb)
        void __iomem    *mregs = musb->mregs;
        u8      devctl = musb_readb(mregs, MUSB_DEVCTL);
 
-       dev_dbg(musb->controller, "devctl %02x\n", devctl);
+       musb_dbg(musb, "musb_g_disconnect: devctl %02x", devctl);
 
        /* clear HR */
        musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
@@ -2064,7 +2054,7 @@ void musb_g_disconnect(struct musb *musb)
 
        switch (musb->xceiv->otg->state) {
        default:
-               dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
+               musb_dbg(musb, "Unhandled disconnect %s, setting a_idle",
                        usb_otg_state_string(musb->xceiv->otg->state));
                musb->xceiv->otg->state = OTG_STATE_A_IDLE;
                MUSB_HST_MODE(musb);
@@ -2094,7 +2084,7 @@ __acquires(musb->lock)
        u8              devctl = musb_readb(mbase, MUSB_DEVCTL);
        u8              power;
 
-       dev_dbg(musb->controller, "<== %s driver '%s'\n",
+       musb_dbg(musb, "<== %s driver '%s'",
                        (devctl & MUSB_DEVCTL_BDEVICE)
                                ? "B-Device" : "A-Device",
                        musb->gadget_driver
index 10d30afe4a3c41469930dd7ea5ac3d57b790c4d6..844a309fe895f04fc3ffa2c0a4748b738906c9bc 100644 (file)
@@ -206,7 +206,7 @@ static inline void musb_try_b_hnp_enable(struct musb *musb)
        void __iomem    *mbase = musb->mregs;
        u8              devctl;
 
-       dev_dbg(musb->controller, "HNP: Setting HR\n");
+       musb_dbg(musb, "HNP: Setting HR");
        devctl = musb_readb(mbase, MUSB_DEVCTL);
        musb_writeb(mbase, MUSB_DEVCTL, devctl | MUSB_DEVCTL_HR);
 }
@@ -303,7 +303,7 @@ __acquires(musb->lock)
                                /* Maybe start the first request in the queue */
                                request = next_request(musb_ep);
                                if (!musb_ep->busy && request) {
-                                       dev_dbg(musb->controller, "restarting the request\n");
+                                       musb_dbg(musb, "restarting the request");
                                        musb_ep_restart(musb, request);
                                }
 
@@ -550,7 +550,7 @@ static void ep0_txstate(struct musb *musb)
 
        if (!req) {
                /* WARN_ON(1); */
-               dev_dbg(musb->controller, "odd; csr0 %04x\n", musb_readw(regs, MUSB_CSR0));
+               musb_dbg(musb, "odd; csr0 %04x", musb_readw(regs, MUSB_CSR0));
                return;
        }
 
@@ -607,7 +607,7 @@ musb_read_setup(struct musb *musb, struct usb_ctrlrequest *req)
        /* NOTE:  earlier 2.6 versions changed setup packets to host
         * order, but now USB packets always stay in USB byte order.
         */
-       dev_dbg(musb->controller, "SETUP req%02x.%02x v%04x i%04x l%d\n",
+       musb_dbg(musb, "SETUP req%02x.%02x v%04x i%04x l%d",
                req->bRequestType,
                req->bRequest,
                le16_to_cpu(req->wValue),
@@ -675,7 +675,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
        csr = musb_readw(regs, MUSB_CSR0);
        len = musb_readb(regs, MUSB_COUNT0);
 
-       dev_dbg(musb->controller, "csr %04x, count %d, ep0stage %s\n",
+       musb_dbg(musb, "csr %04x, count %d, ep0stage %s",
                        csr, len, decode_ep0stage(musb->ep0_state));
 
        if (csr & MUSB_CSR0_P_DATAEND) {
@@ -752,7 +752,7 @@ irqreturn_t musb_g_ep0_irq(struct musb *musb)
 
                /* enter test mode if needed (exit by reset) */
                else if (musb->test_mode) {
-                       dev_dbg(musb->controller, "entering TESTMODE\n");
+                       musb_dbg(musb, "entering TESTMODE");
 
                        if (MUSB_TEST_PACKET == musb->test_mode_nr)
                                musb_load_testpacket(musb);
@@ -864,7 +864,7 @@ setup:
                                break;
                        }
 
-                       dev_dbg(musb->controller, "handled %d, csr %04x, ep0stage %s\n",
+                       musb_dbg(musb, "handled %d, csr %04x, ep0stage %s",
                                handled, csr,
                                decode_ep0stage(musb->ep0_state));
 
@@ -881,7 +881,7 @@ setup:
                        if (handled < 0) {
                                musb_ep_select(mbase, 0);
 stall:
-                               dev_dbg(musb->controller, "stall (%d)\n", handled);
+                               musb_dbg(musb, "stall (%d)", handled);
                                musb->ackpend |= MUSB_CSR0_P_SENDSTALL;
                                musb->ep0_state = MUSB_EP0_STAGE_IDLE;
 finish:
@@ -961,7 +961,7 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
                status = 0;
                break;
        default:
-               dev_dbg(musb->controller, "ep0 request queued in state %d\n",
+               musb_dbg(musb, "ep0 request queued in state %d",
                                musb->ep0_state);
                status = -EINVAL;
                goto cleanup;
@@ -970,7 +970,7 @@ musb_g_ep0_queue(struct usb_ep *e, struct usb_request *r, gfp_t gfp_flags)
        /* add request to the list */
        list_add_tail(&req->list, &ep->req_list);
 
-       dev_dbg(musb->controller, "queue to %s (%s), length=%d\n",
+       musb_dbg(musb, "queue to %s (%s), length=%d",
                        ep->name, ep->is_in ? "IN/TX" : "OUT/RX",
                        req->request.length);
 
@@ -1063,7 +1063,7 @@ static int musb_g_ep0_halt(struct usb_ep *e, int value)
                musb->ackpend = 0;
                break;
        default:
-               dev_dbg(musb->controller, "ep0 can't halt in state %d\n", musb->ep0_state);
+               musb_dbg(musb, "ep0 can't halt in state %d", musb->ep0_state);
                status = -EINVAL;
        }
 
index d227a71d85e19cffef3a11662bb9a23affa18fb3..53bc4ceefe89ad16dcb576fb2fd18c37ad404e23 100644 (file)
@@ -44,6 +44,7 @@
 
 #include "musb_core.h"
 #include "musb_host.h"
+#include "musb_trace.h"
 
 /* MUSB HOST status 22-mar-2006
  *
@@ -131,7 +132,7 @@ static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
                 * I found using a usb-ethernet device and running iperf
                 * (client on AM335x) has very high chance to trigger it.
                 *
-                * Better to turn on dev_dbg() in musb_cleanup_urb() with
+                * Better to turn on musb_dbg() in musb_cleanup_urb() with
                 * CPPI enabled to see the issue when aborting the tx channel.
                 */
                if (dev_WARN_ONCE(musb->controller, retries-- < 1,
@@ -225,8 +226,6 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
        void                    *buf = urb->transfer_buffer;
        u32                     offset = 0;
        struct musb_hw_ep       *hw_ep = qh->hw_ep;
-       unsigned                pipe = urb->pipe;
-       u8                      address = usb_pipedevice(pipe);
        int                     epnum = hw_ep->epnum;
 
        /* initialize software qh state */
@@ -254,16 +253,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
                len = urb->transfer_buffer_length - urb->actual_length;
        }
 
-       dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
-                       qh, urb, address, qh->epnum,
-                       is_in ? "in" : "out",
-                       ({char *s; switch (qh->type) {
-                       case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
-                       case USB_ENDPOINT_XFER_BULK:    s = "-bulk"; break;
-                       case USB_ENDPOINT_XFER_ISOC:    s = "-iso"; break;
-                       default:                        s = "-intr"; break;
-                       } s; }),
-                       epnum, buf + offset, len);
+       trace_musb_urb_start(musb, urb);
 
        /* Configure endpoint */
        musb_ep_set_qh(hw_ep, is_in, qh);
@@ -277,7 +267,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
        switch (qh->type) {
        case USB_ENDPOINT_XFER_ISOC:
        case USB_ENDPOINT_XFER_INT:
-               dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n");
+               musb_dbg(musb, "check whether there's still time for periodic Tx");
                frame = musb_readw(mbase, MUSB_FRAME);
                /* FIXME this doesn't implement that scheduling policy ...
                 * or handle framecounter wrapping
@@ -291,7 +281,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
                } else {
                        qh->frame = urb->start_frame;
                        /* enable SOF interrupt so we can count down */
-                       dev_dbg(musb->controller, "SOF for %d\n", epnum);
+                       musb_dbg(musb, "SOF for %d", epnum);
 #if 1 /* ifndef        CONFIG_ARCH_DAVINCI */
                        musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
 #endif
@@ -299,7 +289,7 @@ musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
                break;
        default:
 start:
-               dev_dbg(musb->controller, "Start TX%d %s\n", epnum,
+               musb_dbg(musb, "Start TX%d %s", epnum,
                        hw_ep->tx_channel ? "dma" : "pio");
 
                if (!hw_ep->tx_channel)
@@ -314,14 +304,7 @@ static void musb_giveback(struct musb *musb, struct urb *urb, int status)
 __releases(musb->lock)
 __acquires(musb->lock)
 {
-       dev_dbg(musb->controller,
-                       "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
-                       urb, urb->complete, status,
-                       usb_pipedevice(urb->pipe),
-                       usb_pipeendpoint(urb->pipe),
-                       usb_pipein(urb->pipe) ? "in" : "out",
-                       urb->actual_length, urb->transfer_buffer_length
-                       );
+       trace_musb_urb_gb(musb, urb);
 
        usb_hcd_unlink_urb_from_ep(musb->hcd, urb);
        spin_unlock(&musb->lock);
@@ -441,7 +424,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
         * for RX, until we have a test case to understand the behavior of TX.
         */
        if ((!status || !is_in) && qh && qh->is_ready) {
-               dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
+               musb_dbg(musb, "... next ep%d %cX urb %p",
                    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
                musb_start_urb(musb, is_in, qh);
        }
@@ -486,7 +469,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
 
        /* musb_ep_select(mbase, epnum); */
        rx_count = musb_readw(epio, MUSB_RXCOUNT);
-       dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
+       musb_dbg(musb, "RX%d count %d, buffer %p len %d/%d", epnum, rx_count,
                        urb->transfer_buffer, qh->offset,
                        urb->transfer_buffer_length);
 
@@ -508,7 +491,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
                                status = -EOVERFLOW;
                                urb->error_count++;
                        }
-                       dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
+                       musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
                        do_flush = 1;
                } else
                        length = rx_count;
@@ -526,7 +509,7 @@ musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
                if (rx_count > length) {
                        if (urb->status == -EINPROGRESS)
                                urb->status = -EOVERFLOW;
-                       dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length);
+                       musb_dbg(musb, "OVERFLOW %d into %d", rx_count, length);
                        do_flush = 1;
                } else
                        length = rx_count;
@@ -750,8 +733,8 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
        u8                      use_dma = 1;
        u16                     csr;
 
-       dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s "
-                               "h_addr%02x h_port%02x bytes %d\n",
+       musb_dbg(musb, "%s hw%d urb %p spd%d dev%d ep%d%s "
+                               "h_addr%02x h_port%02x bytes %d",
                        is_out ? "-->" : "<--",
                        epnum, urb, urb->dev->speed,
                        qh->addr_reg, qh->epnum, is_out ? "out" : "in",
@@ -969,7 +952,7 @@ finish:
                }
 
                csr |= MUSB_RXCSR_H_REQPKT;
-               dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr);
+               musb_dbg(musb, "RXCSR%d := %04x", epnum, csr);
                musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
                csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
        }
@@ -1085,15 +1068,15 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
                request = (struct usb_ctrlrequest *) urb->setup_packet;
 
                if (!request->wLength) {
-                       dev_dbg(musb->controller, "start no-DATA\n");
+                       musb_dbg(musb, "start no-DATA");
                        break;
                } else if (request->bRequestType & USB_DIR_IN) {
-                       dev_dbg(musb->controller, "start IN-DATA\n");
+                       musb_dbg(musb, "start IN-DATA");
                        musb->ep0_stage = MUSB_EP0_IN;
                        more = true;
                        break;
                } else {
-                       dev_dbg(musb->controller, "start OUT-DATA\n");
+                       musb_dbg(musb, "start OUT-DATA");
                        musb->ep0_stage = MUSB_EP0_OUT;
                        more = true;
                }
@@ -1105,7 +1088,7 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
                if (fifo_count) {
                        fifo_dest = (u8 *) (urb->transfer_buffer
                                        + urb->actual_length);
-                       dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n",
+                       musb_dbg(musb, "Sending %d byte%s to ep0 fifo %p",
                                        fifo_count,
                                        (fifo_count == 1) ? "" : "s",
                                        fifo_dest);
@@ -1150,7 +1133,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
                        ? musb_readb(epio, MUSB_COUNT0)
                        : 0;
 
-       dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
+       musb_dbg(musb, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d",
                csr, qh, len, urb, musb->ep0_stage);
 
        /* if we just did status stage, we are done */
@@ -1161,15 +1144,15 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
 
        /* prepare status */
        if (csr & MUSB_CSR0_H_RXSTALL) {
-               dev_dbg(musb->controller, "STALLING ENDPOINT\n");
+               musb_dbg(musb, "STALLING ENDPOINT");
                status = -EPIPE;
 
        } else if (csr & MUSB_CSR0_H_ERROR) {
-               dev_dbg(musb->controller, "no response, csr0 %04x\n", csr);
+               musb_dbg(musb, "no response, csr0 %04x", csr);
                status = -EPROTO;
 
        } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
-               dev_dbg(musb->controller, "control NAK timeout\n");
+               musb_dbg(musb, "control NAK timeout");
 
                /* NOTE:  this code path would be a good place to PAUSE a
                 * control transfer, if another one is queued, so that
@@ -1184,7 +1167,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
        }
 
        if (status) {
-               dev_dbg(musb->controller, "aborting\n");
+               musb_dbg(musb, "aborting");
                retval = IRQ_HANDLED;
                if (urb)
                        urb->status = status;
@@ -1237,7 +1220,7 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
                        /* flag status stage */
                        musb->ep0_stage = MUSB_EP0_STATUS;
 
-                       dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr);
+                       musb_dbg(musb, "ep0 STATUS, csr %04x", csr);
 
                }
                musb_writew(epio, MUSB_CSR0, csr);
@@ -1291,38 +1274,37 @@ void musb_host_tx(struct musb *musb, u8 epnum)
 
        /* with CPPI, DMA sometimes triggers "extra" irqs */
        if (!urb) {
-               dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+               musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
                return;
        }
 
        pipe = urb->pipe;
        dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
-       dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
+       trace_musb_urb_tx(musb, urb);
+       musb_dbg(musb, "OUT/TX%d end, csr %04x%s", epnum, tx_csr,
                        dma ? ", dma" : "");
 
        /* check for errors */
        if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
                /* dma was disabled, fifo flushed */
-               dev_dbg(musb->controller, "TX end %d stall\n", epnum);
+               musb_dbg(musb, "TX end %d stall", epnum);
 
                /* stall; record URB status */
                status = -EPIPE;
 
        } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
                /* (NON-ISO) dma was disabled, fifo flushed */
-               dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum);
+               musb_dbg(musb, "TX 3strikes on ep=%d", epnum);
 
                status = -ETIMEDOUT;
 
        } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
                if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1
                                && !list_is_singular(&musb->out_bulk)) {
-                       dev_dbg(musb->controller,
-                               "NAK timeout on TX%d ep\n", epnum);
+                       musb_dbg(musb, "NAK timeout on TX%d ep", epnum);
                        musb_bulk_nak_timeout(musb, hw_ep, 0);
                } else {
-                       dev_dbg(musb->controller,
-                               "TX end=%d device not responding\n", epnum);
+                       musb_dbg(musb, "TX ep%d device not responding", epnum);
                        /* NOTE:  this code path would be a good place to PAUSE a
                         * transfer, if there's some other (nonperiodic) tx urb
                         * that could use this fifo.  (dma complicates it...)
@@ -1368,7 +1350,7 @@ done:
 
        /* second cppi case */
        if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
-               dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
+               musb_dbg(musb, "extra TX%d ready, csr %04x", epnum, tx_csr);
                return;
        }
 
@@ -1427,8 +1409,9 @@ done:
                 * FIFO mode too...
                 */
                if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
-                       dev_dbg(musb->controller, "DMA complete but packet still in FIFO, "
-                           "CSR %04x\n", tx_csr);
+                       musb_dbg(musb,
+                               "DMA complete but FIFO not empty, CSR %04x",
+                               tx_csr);
                        return;
                }
        }
@@ -1494,7 +1477,7 @@ done:
                        return;
                }
        } else  if (tx_csr & MUSB_TXCSR_DMAENAB) {
-               dev_dbg(musb->controller, "not complete, but DMA enabled?\n");
+               musb_dbg(musb, "not complete, but DMA enabled?");
                return;
        }
 
@@ -1723,7 +1706,7 @@ static int musb_rx_dma_in_inventra_cppi41(struct dma_controller *dma,
                                d_status = -EOVERFLOW;
                                urb->error_count++;
                        }
-                       dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",
+                       musb_dbg(musb, "** OVERFLOW %d into %d",
                                rx_count, d->length);
 
                        length = d->length;
@@ -1847,28 +1830,26 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                 * usbtest #11 (unlinks) triggers it regularly, sometimes
                 * with fifo full.  (Only with DMA??)
                 */
-               dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
-                       musb_readw(epio, MUSB_RXCOUNT));
+               musb_dbg(musb, "BOGUS RX%d ready, csr %04x, count %d",
+                       epnum, val, musb_readw(epio, MUSB_RXCOUNT));
                musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
                return;
        }
 
        pipe = urb->pipe;
 
-       dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
-               epnum, rx_csr, urb->actual_length,
-               dma ? dma->actual_len : 0);
+       trace_musb_urb_rx(musb, urb);
 
        /* check for errors, concurrent stall & unlink is not really
         * handled yet! */
        if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
-               dev_dbg(musb->controller, "RX end %d STALL\n", epnum);
+               musb_dbg(musb, "RX end %d STALL", epnum);
 
                /* stall; record URB status */
                status = -EPIPE;
 
        } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
-               dev_dbg(musb->controller, "end %d RX proto error\n", epnum);
+               musb_dbg(musb, "end %d RX proto error", epnum);
 
                status = -EPROTO;
                musb_writeb(epio, MUSB_RXINTERVAL, 0);
@@ -1879,7 +1860,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
        } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
 
                if (USB_ENDPOINT_XFER_ISOC != qh->type) {
-                       dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum);
+                       musb_dbg(musb, "RX end %d NAK timeout", epnum);
 
                        /* NOTE: NAKing is *NOT* an error, so we want to
                         * continue.  Except ... if there's a request for
@@ -1902,12 +1883,12 @@ void musb_host_rx(struct musb *musb, u8 epnum)
 
                        goto finish;
                } else {
-                       dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum);
+                       musb_dbg(musb, "RX end %d ISO data error", epnum);
                        /* packet error reported later */
                        iso_err = true;
                }
        } else if (rx_csr & MUSB_RXCSR_INCOMPRX) {
-               dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n",
+               musb_dbg(musb, "end %d high bandwidth incomplete ISO packet RX",
                                epnum);
                status = -EPROTO;
        }
@@ -1952,7 +1933,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                        done = true;
                }
 
-               dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
+               musb_dbg(musb, "RXCSR%d %04x, reqpkt, len %zu%s", epnum, rx_csr,
                                xfer_len, dma ? ", dma" : "");
                rx_csr &= ~MUSB_RXCSR_H_REQPKT;
 
@@ -1973,8 +1954,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                if (musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
                    musb_dma_cppi41(musb)) {
                            done = musb_rx_dma_inventra_cppi41(c, hw_ep, qh, urb, xfer_len);
-                           dev_dbg(hw_ep->musb->controller,
-                                   "ep %d dma %s, rxcsr %04x, rxcount %d\n",
+                           musb_dbg(hw_ep->musb,
+                                   "ep %d dma %s, rxcsr %04x, rxcount %d",
                                    epnum, done ? "off" : "reset",
                                    musb_readw(epio, MUSB_RXCSR),
                                    musb_readw(epio, MUSB_RXCOUNT));
@@ -2001,8 +1982,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                /* we are expecting IN packets */
                if ((musb_dma_inventra(musb) || musb_dma_ux500(musb) ||
                    musb_dma_cppi41(musb)) && dma) {
-                       dev_dbg(hw_ep->musb->controller,
-                               "RX%d count %d, buffer 0x%llx len %d/%d\n",
+                       musb_dbg(hw_ep->musb,
+                               "RX%d count %d, buffer 0x%llx len %d/%d",
                                epnum, musb_readw(epio, MUSB_RXCOUNT),
                                (unsigned long long) urb->transfer_dma
                                + urb->actual_length,
@@ -2054,7 +2035,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                                done = musb_host_packet_rx(musb, urb,
                                                epnum, iso_err);
                        }
-                       dev_dbg(musb->controller, "read %spacket\n", done ? "last " : "");
+                       musb_dbg(musb, "read %spacket", done ? "last " : "");
                }
        }
 
@@ -2178,7 +2159,7 @@ static int musb_schedule(
        idle = 1;
        qh->mux = 0;
        hw_ep = musb->endpoints + best_end;
-       dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end);
+       musb_dbg(musb, "qh %p periodic slot %d", qh, best_end);
 success:
        if (head) {
                idle = list_empty(head);
@@ -2210,6 +2191,8 @@ static int musb_urb_enqueue(
        if (!is_host_active(musb) || !musb->is_active)
                return -ENODEV;
 
+       trace_musb_urb_enq(musb, urb);
+
        spin_lock_irqsave(&musb->lock, flags);
        ret = usb_hcd_link_urb_to_ep(hcd, urb);
        qh = ret ? NULL : hep->hcpriv;
@@ -2400,8 +2383,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh)
                dma = is_in ? ep->rx_channel : ep->tx_channel;
                if (dma) {
                        status = ep->musb->dma_controller->channel_abort(dma);
-                       dev_dbg(musb->controller,
-                               "abort %cX%d DMA for urb %p --> %d\n",
+                       musb_dbg(musb, "abort %cX%d DMA for urb %p --> %d",
                                is_in ? 'R' : 'T', ep->epnum,
                                urb, status);
                        urb->actual_length += dma->actual_len;
@@ -2447,10 +2429,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
        int                     is_in  = usb_pipein(urb->pipe);
        int                     ret;
 
-       dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb,
-                       usb_pipedevice(urb->pipe),
-                       usb_pipeendpoint(urb->pipe),
-                       is_in ? "in" : "out");
+       trace_musb_urb_deq(musb, urb);
 
        spin_lock_irqsave(&musb->lock, flags);
        ret = usb_hcd_check_unlink_urb(hcd, urb, status);
diff --git a/drivers/usb/musb/musb_trace.c b/drivers/usb/musb/musb_trace.c
new file mode 100644 (file)
index 0000000..70973d9
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * musb_trace.c - MUSB Controller Trace Support
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Bin Liu <b-liu@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "musb_trace.h"
+
+void musb_dbg(struct musb *musb, const char *fmt, ...)
+{
+       struct va_format vaf;
+       va_list args;
+
+       va_start(args, fmt);
+       vaf.fmt = fmt;
+       vaf.va = &args;
+
+       trace_musb_log(musb, &vaf);
+
+       va_end(args);
+}
diff --git a/drivers/usb/musb/musb_trace.h b/drivers/usb/musb/musb_trace.h
new file mode 100644 (file)
index 0000000..f031c9e
--- /dev/null
@@ -0,0 +1,371 @@
+/*
+ * musb_trace.h - MUSB Controller Trace Support
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Bin Liu <b-liu@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2  of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM musb
+
+#if !defined(__MUSB_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __MUSB_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/usb.h>
+#include "musb_core.h"
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+#include "cppi_dma.h"
+#endif
+
+#define MUSB_MSG_MAX   500
+
+TRACE_EVENT(musb_log,
+       TP_PROTO(struct musb *musb, struct va_format *vaf),
+       TP_ARGS(musb, vaf),
+       TP_STRUCT__entry(
+               __string(name, dev_name(musb->controller))
+               __dynamic_array(char, msg, MUSB_MSG_MAX)
+       ),
+       TP_fast_assign(
+               __assign_str(name, dev_name(musb->controller));
+               vsnprintf(__get_str(msg), MUSB_MSG_MAX, vaf->fmt, *vaf->va);
+       ),
+       TP_printk("%s: %s", __get_str(name), __get_str(msg))
+);
+
+DECLARE_EVENT_CLASS(musb_regb,
+       TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+       TP_ARGS(caller, addr, offset, data),
+       TP_STRUCT__entry(
+               __field(void *, caller)
+               __field(const void *, addr)
+               __field(unsigned int, offset)
+               __field(u8, data)
+       ),
+       TP_fast_assign(
+               __entry->caller = caller;
+               __entry->addr = addr;
+               __entry->offset = offset;
+               __entry->data = data;
+       ),
+       TP_printk("%pS: %p + %04x: %02x",
+               __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regb, musb_readb,
+       TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+       TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regb, musb_writeb,
+       TP_PROTO(void *caller, const void *addr, unsigned int offset, u8 data),
+       TP_ARGS(caller, addr, offset, data)
+);
+
+DECLARE_EVENT_CLASS(musb_regw,
+       TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+       TP_ARGS(caller, addr, offset, data),
+       TP_STRUCT__entry(
+               __field(void *, caller)
+               __field(const void *, addr)
+               __field(unsigned int, offset)
+               __field(u16, data)
+       ),
+       TP_fast_assign(
+               __entry->caller = caller;
+               __entry->addr = addr;
+               __entry->offset = offset;
+               __entry->data = data;
+       ),
+       TP_printk("%pS: %p + %04x: %04x",
+               __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regw, musb_readw,
+       TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+       TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regw, musb_writew,
+       TP_PROTO(void *caller, const void *addr, unsigned int offset, u16 data),
+       TP_ARGS(caller, addr, offset, data)
+);
+
+DECLARE_EVENT_CLASS(musb_regl,
+       TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+       TP_ARGS(caller, addr, offset, data),
+       TP_STRUCT__entry(
+               __field(void *, caller)
+               __field(const void *, addr)
+               __field(unsigned int, offset)
+               __field(u32, data)
+       ),
+       TP_fast_assign(
+               __entry->caller = caller;
+               __entry->addr = addr;
+               __entry->offset = offset;
+               __entry->data = data;
+       ),
+       TP_printk("%pS: %p + %04x: %08x",
+               __entry->caller, __entry->addr, __entry->offset, __entry->data)
+);
+
+DEFINE_EVENT(musb_regl, musb_readl,
+       TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+       TP_ARGS(caller, addr, offset, data)
+);
+
+DEFINE_EVENT(musb_regl, musb_writel,
+       TP_PROTO(void *caller, const void *addr, unsigned int offset, u32 data),
+       TP_ARGS(caller, addr, offset, data)
+);
+
+TRACE_EVENT(musb_isr,
+       TP_PROTO(struct musb *musb),
+       TP_ARGS(musb),
+       TP_STRUCT__entry(
+               __string(name, dev_name(musb->controller))
+               __field(u8, int_usb)
+               __field(u16, int_tx)
+               __field(u16, int_rx)
+       ),
+       TP_fast_assign(
+               __assign_str(name, dev_name(musb->controller));
+               __entry->int_usb = musb->int_usb;
+               __entry->int_tx = musb->int_tx;
+               __entry->int_rx = musb->int_rx;
+       ),
+       TP_printk("%s: usb %02x, tx %04x, rx %04x",
+               __get_str(name), __entry->int_usb,
+               __entry->int_tx, __entry->int_rx
+       )
+);
+
+DECLARE_EVENT_CLASS(musb_urb,
+       TP_PROTO(struct musb *musb, struct urb *urb),
+       TP_ARGS(musb, urb),
+       TP_STRUCT__entry(
+               __string(name, dev_name(musb->controller))
+               __field(struct urb *, urb)
+               __field(unsigned int, pipe)
+               __field(int, status)
+               __field(unsigned int, flag)
+               __field(u32, buf_len)
+               __field(u32, actual_len)
+       ),
+       TP_fast_assign(
+               __assign_str(name, dev_name(musb->controller));
+               __entry->urb = urb;
+               __entry->pipe = urb->pipe;
+               __entry->status = urb->status;
+               __entry->flag = urb->transfer_flags;
+               __entry->buf_len = urb->transfer_buffer_length;
+               __entry->actual_len = urb->actual_length;
+       ),
+       TP_printk("%s: %p, dev%d ep%d%s, flag 0x%x, len %d/%d, status %d",
+                       __get_str(name), __entry->urb,
+                       usb_pipedevice(__entry->pipe),
+                       usb_pipeendpoint(__entry->pipe),
+                       usb_pipein(__entry->pipe) ? "in" : "out",
+                       __entry->flag,
+                       __entry->actual_len, __entry->buf_len,
+                       __entry->status
+       )
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_start,
+       TP_PROTO(struct musb *musb, struct urb *urb),
+       TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_gb,
+       TP_PROTO(struct musb *musb, struct urb *urb),
+       TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_rx,
+       TP_PROTO(struct musb *musb, struct urb *urb),
+       TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_tx,
+       TP_PROTO(struct musb *musb, struct urb *urb),
+       TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_enq,
+       TP_PROTO(struct musb *musb, struct urb *urb),
+       TP_ARGS(musb, urb)
+);
+
+DEFINE_EVENT(musb_urb, musb_urb_deq,
+       TP_PROTO(struct musb *musb, struct urb *urb),
+       TP_ARGS(musb, urb)
+);
+
+DECLARE_EVENT_CLASS(musb_req,
+       TP_PROTO(struct musb_request *req),
+       TP_ARGS(req),
+       TP_STRUCT__entry(
+               __field(struct usb_request *, req)
+               __field(u8, is_tx)
+               __field(u8, epnum)
+               __field(int, status)
+               __field(unsigned int, buf_len)
+               __field(unsigned int, actual_len)
+               __field(unsigned int, zero)
+               __field(unsigned int, short_not_ok)
+               __field(unsigned int, no_interrupt)
+       ),
+       TP_fast_assign(
+               __entry->req = &req->request;
+               __entry->is_tx = req->tx;
+               __entry->epnum = req->epnum;
+               __entry->status = req->request.status;
+               __entry->buf_len = req->request.length;
+               __entry->actual_len = req->request.actual;
+               __entry->zero = req->request.zero;
+               __entry->short_not_ok = req->request.short_not_ok;
+               __entry->no_interrupt = req->request.no_interrupt;
+       ),
+       TP_printk("%p, ep%d %s, %s%s%s, len %d/%d, status %d",
+                       __entry->req, __entry->epnum,
+                       __entry->is_tx ? "tx/IN" : "rx/OUT",
+                       __entry->zero ? "Z" : "z",
+                       __entry->short_not_ok ? "S" : "s",
+                       __entry->no_interrupt ? "I" : "i",
+                       __entry->actual_len, __entry->buf_len,
+                       __entry->status
+       )
+);
+
+DEFINE_EVENT(musb_req, musb_req_gb,
+       TP_PROTO(struct musb_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_tx,
+       TP_PROTO(struct musb_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_rx,
+       TP_PROTO(struct musb_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_alloc,
+       TP_PROTO(struct musb_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_free,
+       TP_PROTO(struct musb_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_start,
+       TP_PROTO(struct musb_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_enq,
+       TP_PROTO(struct musb_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(musb_req, musb_req_deq,
+       TP_PROTO(struct musb_request *req),
+       TP_ARGS(req)
+);
+
+#ifdef CONFIG_USB_TI_CPPI41_DMA
+DECLARE_EVENT_CLASS(musb_cppi41,
+       TP_PROTO(struct cppi41_dma_channel *ch),
+       TP_ARGS(ch),
+       TP_STRUCT__entry(
+               __field(struct cppi41_dma_channel *, ch)
+               __string(name, dev_name(ch->hw_ep->musb->controller))
+               __field(u8, hwep)
+               __field(u8, port)
+               __field(u8, is_tx)
+               __field(u32, len)
+               __field(u32, prog_len)
+               __field(u32, xferred)
+       ),
+       TP_fast_assign(
+               __entry->ch = ch;
+               __assign_str(name, dev_name(ch->hw_ep->musb->controller));
+               __entry->hwep = ch->hw_ep->epnum;
+               __entry->port = ch->port_num;
+               __entry->is_tx = ch->is_tx;
+               __entry->len = ch->total_len;
+               __entry->prog_len = ch->prog_len;
+               __entry->xferred = ch->transferred;
+       ),
+       TP_printk("%s: %p, hwep%d ch%d%s, prog_len %d, len %d/%d",
+                       __get_str(name), __entry->ch, __entry->hwep,
+                       __entry->port, __entry->is_tx ? "tx" : "rx",
+                       __entry->prog_len, __entry->xferred, __entry->len
+       )
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_done,
+       TP_PROTO(struct cppi41_dma_channel *ch),
+       TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_gb,
+       TP_PROTO(struct cppi41_dma_channel *ch),
+       TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_config,
+       TP_PROTO(struct cppi41_dma_channel *ch),
+       TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_cont,
+       TP_PROTO(struct cppi41_dma_channel *ch),
+       TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_alloc,
+       TP_PROTO(struct cppi41_dma_channel *ch),
+       TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_abort,
+       TP_PROTO(struct cppi41_dma_channel *ch),
+       TP_ARGS(ch)
+);
+
+DEFINE_EVENT(musb_cppi41, musb_cppi41_free,
+       TP_PROTO(struct cppi41_dma_channel *ch),
+       TP_ARGS(ch)
+);
+#endif /* CONFIG_USB_TI_CPPI41_DMA */
+
+#endif /* __MUSB_TRACE_H */
+
+/* this part has to be here */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE musb_trace
+
+#include <trace/define_trace.h>
index 92d5f718659b7ebbeba0898428de681af18fdf2a..192248f974ec58c88851441e7569d82a4c867f14 100644 (file)
@@ -55,8 +55,7 @@ void musb_host_finish_resume(struct work_struct *work)
 
        power = musb_readb(musb->mregs, MUSB_POWER);
        power &= ~MUSB_POWER_RESUME;
-       dev_dbg(musb->controller, "root port resume stopped, power %02x\n",
-               power);
+       musb_dbg(musb, "root port resume stopped, power %02x", power);
        musb_writeb(musb->mregs, MUSB_POWER, power);
 
        /*
@@ -104,7 +103,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
                                break;
                }
 
-               dev_dbg(musb->controller, "Root port suspended, power %02x\n", power);
+               musb_dbg(musb, "Root port suspended, power %02x", power);
 
                musb->port1_status |= USB_PORT_STAT_SUSPEND;
                switch (musb->xceiv->otg->state) {
@@ -123,7 +122,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
                        musb_platform_try_idle(musb, 0);
                        break;
                default:
-                       dev_dbg(musb->controller, "bogus rh suspend? %s\n",
+                       musb_dbg(musb, "bogus rh suspend? %s",
                                usb_otg_state_string(musb->xceiv->otg->state));
                }
        } else if (power & MUSB_POWER_SUSPENDM) {
@@ -131,7 +130,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
                power |= MUSB_POWER_RESUME;
                musb_writeb(mbase, MUSB_POWER, power);
 
-               dev_dbg(musb->controller, "Root port resuming, power %02x\n", power);
+               musb_dbg(musb, "Root port resuming, power %02x", power);
 
                /* later, GetPortStatus will stop RESUME signaling */
                musb->port1_status |= MUSB_PORT_STAT_RESUME;
@@ -146,7 +145,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
        void __iomem    *mbase = musb->mregs;
 
        if (musb->xceiv->otg->state == OTG_STATE_B_IDLE) {
-               dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n");
+               musb_dbg(musb, "HNP: Returning from HNP; no hub reset from b_idle");
                musb->port1_status &= ~USB_PORT_STAT_RESET;
                return;
        }
@@ -194,7 +193,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
                schedule_delayed_work(&musb->deassert_reset_work,
                                      msecs_to_jiffies(50));
        } else {
-               dev_dbg(musb->controller, "root port reset stopped\n");
+               musb_dbg(musb, "root port reset stopped");
                musb_platform_pre_root_reset_end(musb);
                musb_writeb(mbase, MUSB_POWER,
                                power & ~MUSB_POWER_RESET);
@@ -202,7 +201,7 @@ void musb_port_reset(struct musb *musb, bool do_reset)
 
                power = musb_readb(mbase, MUSB_POWER);
                if (power & MUSB_POWER_HSMODE) {
-                       dev_dbg(musb->controller, "high-speed device connected\n");
+                       musb_dbg(musb, "high-speed device connected");
                        musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
                }
 
@@ -242,7 +241,7 @@ void musb_root_disconnect(struct musb *musb)
                musb->xceiv->otg->state = OTG_STATE_B_IDLE;
                break;
        default:
-               dev_dbg(musb->controller, "host disconnect (%s)\n",
+               musb_dbg(musb, "host disconnect (%s)",
                        usb_otg_state_string(musb->xceiv->otg->state));
        }
 }
@@ -337,7 +336,7 @@ int musb_hub_control(
                default:
                        goto error;
                }
-               dev_dbg(musb->controller, "clear feature %d\n", wValue);
+               musb_dbg(musb, "clear feature %d", wValue);
                musb->port1_status &= ~(1 << wValue);
                break;
        case GetHubDescriptor:
@@ -372,8 +371,7 @@ int musb_hub_control(
                                (__le32 *) buf);
 
                /* port change status is more interesting */
-               dev_dbg(musb->controller, "port status %08x\n",
-                               musb->port1_status);
+               musb_dbg(musb, "port status %08x", musb->port1_status);
                break;
        case SetPortFeature:
                if ((wIndex & 0xff) != 1)
@@ -443,7 +441,7 @@ int musb_hub_control(
                default:
                        goto error;
                }
-               dev_dbg(musb->controller, "set feature %d\n", wValue);
+               musb_dbg(musb, "set feature %d", wValue);
                musb->port1_status |= 1 << wValue;
                break;
 
index 8abfe4ec62fb53d639c75e04da3de231ad76642d..3620073da58c9697cd04d864a865d8408e16bbcb 100644 (file)
@@ -117,7 +117,7 @@ static void configure_channel(struct dma_channel *channel,
        u8 bchannel = musb_channel->idx;
        u16 csr = 0;
 
-       dev_dbg(musb->controller, "%p, pkt_sz %d, addr %pad, len %d, mode %d\n",
+       musb_dbg(musb, "%p, pkt_sz %d, addr %pad, len %d, mode %d",
                        channel, packet_sz, &dma_addr, len, mode);
 
        if (mode) {
@@ -152,7 +152,7 @@ static int dma_channel_program(struct dma_channel *channel,
        struct musb_dma_controller *controller = musb_channel->controller;
        struct musb *musb = controller->private_data;
 
-       dev_dbg(musb->controller, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d\n",
+       musb_dbg(musb, "ep%d-%s pkt_sz %d, dma_addr %pad length %d, mode %d",
                musb_channel->epnum,
                musb_channel->transmit ? "Tx" : "Rx",
                packet_sz, &dma_addr, len, mode);
@@ -266,7 +266,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
 #endif
 
        if (!int_hsdma) {
-               dev_dbg(musb->controller, "spurious DMA irq\n");
+               musb_dbg(musb, "spurious DMA irq");
 
                for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
                        musb_channel = (struct musb_dma_channel *)
@@ -280,7 +280,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
                        }
                }
 
-               dev_dbg(musb->controller, "int_hsdma = 0x%x\n", int_hsdma);
+               musb_dbg(musb, "int_hsdma = 0x%x", int_hsdma);
 
                if (!int_hsdma)
                        goto done;
@@ -307,7 +307,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
                                channel->actual_len = addr
                                        - musb_channel->start_addr;
 
-                               dev_dbg(musb->controller, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n",
+                               musb_dbg(musb, "ch %p, 0x%x -> 0x%x (%zu / %d) %s",
                                        channel, musb_channel->start_addr,
                                        addr, channel->actual_len,
                                        musb_channel->len,
index 76500515dd8ba0884eae778253527f52601f2219..c6ee16660572b3b7fc7272096a12ae1705026a8b 100644 (file)
@@ -256,12 +256,10 @@ static int sunxi_musb_init(struct musb *musb)
        writeb(SUNXI_MUSB_VEND0_PIO_MODE, musb->mregs + SUNXI_MUSB_VEND0);
 
        /* Register notifier before calling phy_init() */
-       if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE) {
-               ret = extcon_register_notifier(glue->extcon, EXTCON_USB_HOST,
-                                              &glue->host_nb);
-               if (ret)
-                       goto error_reset_assert;
-       }
+       ret = extcon_register_notifier(glue->extcon, EXTCON_USB_HOST,
+                                      &glue->host_nb);
+       if (ret)
+               goto error_reset_assert;
 
        ret = phy_init(glue->phy);
        if (ret)
@@ -275,9 +273,8 @@ static int sunxi_musb_init(struct musb *musb)
        return 0;
 
 error_unregister_notifier:
-       if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
-               extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
-                                          &glue->host_nb);
+       extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+                                  &glue->host_nb);
 error_reset_assert:
        if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
                reset_control_assert(glue->rst);
@@ -301,9 +298,8 @@ static int sunxi_musb_exit(struct musb *musb)
 
        phy_exit(glue->phy);
 
-       if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
-               extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
-                                          &glue->host_nb);
+       extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
+                                  &glue->host_nb);
 
        if (test_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags))
                reset_control_assert(glue->rst);
@@ -315,25 +311,6 @@ static int sunxi_musb_exit(struct musb *musb)
        return 0;
 }
 
-static int sunxi_set_mode(struct musb *musb, u8 mode)
-{
-       struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
-       int ret;
-
-       if (mode == MUSB_HOST) {
-               ret = phy_power_on(glue->phy);
-               if (ret)
-                       return ret;
-
-               set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
-               /* Stop musb work from turning vbus off again */
-               set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
-               musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
-       }
-
-       return 0;
-}
-
 static void sunxi_musb_enable(struct musb *musb)
 {
        struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
@@ -354,13 +331,13 @@ static void sunxi_musb_disable(struct musb *musb)
        clear_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags);
 }
 
-struct dma_controller *sunxi_musb_dma_controller_create(struct musb *musb,
-                                                   void __iomem *base)
+static struct dma_controller *
+sunxi_musb_dma_controller_create(struct musb *musb, void __iomem *base)
 {
        return NULL;
 }
 
-void sunxi_musb_dma_controller_destroy(struct dma_controller *c)
+static void sunxi_musb_dma_controller_destroy(struct dma_controller *c)
 {
 }
 
@@ -582,7 +559,6 @@ static const struct musb_platform_ops sunxi_musb_ops = {
        .exit           = sunxi_musb_exit,
        .enable         = sunxi_musb_enable,
        .disable        = sunxi_musb_disable,
-       .set_mode       = sunxi_set_mode,
        .fifo_offset    = sunxi_musb_fifo_offset,
        .ep_offset      = sunxi_musb_ep_offset,
        .busctl_offset  = sunxi_musb_busctl_offset,
@@ -638,10 +614,6 @@ static int sunxi_musb_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
-       if (!glue)
-               return -ENOMEM;
-
        memset(&pdata, 0, sizeof(pdata));
        switch (usb_get_dr_mode(&pdev->dev)) {
 #if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_HOST
@@ -649,15 +621,13 @@ static int sunxi_musb_probe(struct platform_device *pdev)
                pdata.mode = MUSB_PORT_MODE_HOST;
                break;
 #endif
+#if defined CONFIG_USB_MUSB_DUAL_ROLE || defined CONFIG_USB_MUSB_GADGET
+       case USB_DR_MODE_PERIPHERAL:
+               pdata.mode = MUSB_PORT_MODE_GADGET;
+               break;
+#endif
 #ifdef CONFIG_USB_MUSB_DUAL_ROLE
        case USB_DR_MODE_OTG:
-               glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
-               if (IS_ERR(glue->extcon)) {
-                       if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
-                               return -EPROBE_DEFER;
-                       dev_err(&pdev->dev, "Invalid or missing extcon\n");
-                       return PTR_ERR(glue->extcon);
-               }
                pdata.mode = MUSB_PORT_MODE_DUAL_ROLE;
                break;
 #endif
@@ -668,6 +638,10 @@ static int sunxi_musb_probe(struct platform_device *pdev)
        pdata.platform_ops      = &sunxi_musb_ops;
        pdata.config            = &sunxi_musb_hdrc_config;
 
+       glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
+       if (!glue)
+               return -ENOMEM;
+
        glue->dev = &pdev->dev;
        INIT_WORK(&glue->work, sunxi_musb_work);
        glue->host_nb.notifier_call = sunxi_musb_host_notifier;
@@ -701,6 +675,14 @@ static int sunxi_musb_probe(struct platform_device *pdev)
                }
        }
 
+       glue->extcon = extcon_get_edev_by_phandle(&pdev->dev, 0);
+       if (IS_ERR(glue->extcon)) {
+               if (PTR_ERR(glue->extcon) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+               dev_err(&pdev->dev, "Invalid or missing extcon\n");
+               return PTR_ERR(glue->extcon);
+       }
+
        glue->phy = devm_phy_get(&pdev->dev, "usb");
        if (IS_ERR(glue->phy)) {
                if (PTR_ERR(glue->phy) == -EPROBE_DEFER)
index c6904742e2aa45e25fc8ba10bed824391f92e117..b9c409a18faaea511d3280a01bd519769a3ccbd4 100644 (file)
@@ -21,6 +21,7 @@ config AB8500_USB
 config FSL_USB2_OTG
        bool "Freescale USB OTG Transceiver Driver"
        depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
        select USB_PHY
        help
          Enable this to support Freescale USB OTG transceiver.
@@ -29,6 +30,7 @@ config ISP1301_OMAP
        tristate "Philips ISP1301 with OMAP OTG"
        depends on I2C && ARCH_OMAP_OTG
        depends on USB
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
        select USB_PHY
        help
          If you say yes here you get support for the Philips ISP1301
@@ -43,7 +45,7 @@ config ISP1301_OMAP
 config KEYSTONE_USB_PHY
        tristate "Keystone USB PHY Driver"
        depends on ARCH_KEYSTONE || COMPILE_TEST
-       select NOP_USB_XCEIV
+       depends on NOP_USB_XCEIV
        help
          Enable this to support Keystone USB phy. This driver provides
          interface to interact with USB 2.0 and USB 3.0 PHY that is part
@@ -51,6 +53,7 @@ config KEYSTONE_USB_PHY
 
 config NOP_USB_XCEIV
        tristate "NOP USB Transceiver Driver"
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, NOP can't be built-in
        select USB_PHY
        help
          This driver is to be used by all the usb transceiver which are either
@@ -63,9 +66,9 @@ config AM335X_CONTROL_USB
 config AM335X_PHY_USB
        tristate "AM335x USB PHY Driver"
        depends on ARM || COMPILE_TEST
+       depends on NOP_USB_XCEIV
        select USB_PHY
        select AM335X_CONTROL_USB
-       select NOP_USB_XCEIV
        select USB_COMMON
        help
          This driver provides PHY support for that phy which part for the
@@ -92,6 +95,7 @@ config TWL6030_USB
 config USB_GPIO_VBUS
        tristate "GPIO based peripheral-only VBUS sensing 'transceiver'"
        depends on GPIOLIB || COMPILE_TEST
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
        select USB_PHY
        help
          Provides simple GPIO VBUS sensing for controllers with an
@@ -112,6 +116,7 @@ config OMAP_OTG
 config TAHVO_USB
        tristate "Tahvo USB transceiver driver"
        depends on MFD_RETU && EXTCON
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
        select USB_PHY
        help
          Enable this to support USB transceiver on Tahvo. This is used
@@ -140,6 +145,7 @@ config USB_ISP1301
 config USB_MSM_OTG
        tristate "Qualcomm on-chip USB OTG controller support"
        depends on (USB || USB_GADGET) && (ARCH_QCOM || COMPILE_TEST)
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
        depends on RESET_CONTROLLER
        depends on EXTCON
        select USB_PHY
@@ -169,6 +175,7 @@ config USB_QCOM_8X16_PHY
 config USB_MV_OTG
        tristate "Marvell USB OTG support"
        depends on USB_EHCI_MV && USB_MV_UDC && PM && USB_OTG
+       depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
        select USB_PHY
        help
          Say Y here if you want to build Marvell USB OTG transciever
index a262a4343f29508c58c6ddcaf36c1ddd467893e8..7e5aece769da1a494b24173d9b3e4bb28f18d468 100644 (file)
@@ -54,7 +54,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
                return am_phy->id;
        }
 
-       am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node);
+       am_phy->dr_mode = of_usb_get_dr_mode_by_phy(pdev->dev.of_node, -1);
 
        ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen, NULL);
        if (ret)
index 72b387d592c278eafb5a73e519482b405d921e39..8a34759727bb8e2cd92ea5b64b8ac64ede7c7b8a 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/module.h>
 #include <linux/device.h>
+#include <linux/extcon.h>
 #include <linux/gpio/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
@@ -35,6 +36,8 @@
 #include <linux/of_device.h>
 #include <linux/reboot.h>
 #include <linux/reset.h>
+#include <linux/types.h>
+#include <linux/usb/otg.h>
 
 #include <linux/usb.h>
 #include <linux/usb/otg.h>
 #include <linux/usb/ulpi.h>
 #include <linux/usb/gadget.h>
 #include <linux/usb/hcd.h>
-#include <linux/usb/msm_hsusb.h>
 #include <linux/usb/msm_hsusb_hw.h>
 #include <linux/regulator/consumer.h>
 
+/**
+ * OTG control
+ *
+ * OTG_NO_CONTROL      Id/VBUS notifications not required. Useful in host
+ *                      only configuration.
+ * OTG_PHY_CONTROL     Id/VBUS notifications comes form USB PHY.
+ * OTG_PMIC_CONTROL    Id/VBUS notifications comes from PMIC hardware.
+ * OTG_USER_CONTROL    Id/VBUS notifcations comes from User via sysfs.
+ *
+ */
+enum otg_control_type {
+       OTG_NO_CONTROL = 0,
+       OTG_PHY_CONTROL,
+       OTG_PMIC_CONTROL,
+       OTG_USER_CONTROL,
+};
+
+/**
+ * PHY used in
+ *
+ * INVALID_PHY                 Unsupported PHY
+ * CI_45NM_INTEGRATED_PHY      Chipidea 45nm integrated PHY
+ * SNPS_28NM_INTEGRATED_PHY    Synopsis 28nm integrated PHY
+ *
+ */
+enum msm_usb_phy_type {
+       INVALID_PHY = 0,
+       CI_45NM_INTEGRATED_PHY,
+       SNPS_28NM_INTEGRATED_PHY,
+};
+
+#define IDEV_CHG_MAX   1500
+#define IUNIT          100
+
+/**
+ * Different states involved in USB charger detection.
+ *
+ * USB_CHG_STATE_UNDEFINED     USB charger is not connected or detection
+ *                              process is not yet started.
+ * USB_CHG_STATE_WAIT_FOR_DCD  Waiting for Data pins contact.
+ * USB_CHG_STATE_DCD_DONE      Data pin contact is detected.
+ * USB_CHG_STATE_PRIMARY_DONE  Primary detection is completed (Detects
+ *                              between SDP and DCP/CDP).
+ * USB_CHG_STATE_SECONDARY_DONE        Secondary detection is completed (Detects
+ *                              between DCP and CDP).
+ * USB_CHG_STATE_DETECTED      USB charger type is determined.
+ *
+ */
+enum usb_chg_state {
+       USB_CHG_STATE_UNDEFINED = 0,
+       USB_CHG_STATE_WAIT_FOR_DCD,
+       USB_CHG_STATE_DCD_DONE,
+       USB_CHG_STATE_PRIMARY_DONE,
+       USB_CHG_STATE_SECONDARY_DONE,
+       USB_CHG_STATE_DETECTED,
+};
+
+/**
+ * USB charger types
+ *
+ * USB_INVALID_CHARGER Invalid USB charger.
+ * USB_SDP_CHARGER     Standard downstream port. Refers to a downstream port
+ *                      on USB2.0 compliant host/hub.
+ * USB_DCP_CHARGER     Dedicated charger port (AC charger/ Wall charger).
+ * USB_CDP_CHARGER     Charging downstream port. Enumeration can happen and
+ *                      IDEV_CHG_MAX can be drawn irrespective of USB state.
+ *
+ */
+enum usb_chg_type {
+       USB_INVALID_CHARGER = 0,
+       USB_SDP_CHARGER,
+       USB_DCP_CHARGER,
+       USB_CDP_CHARGER,
+};
+
+/**
+ * struct msm_otg_platform_data - platform device data
+ *              for msm_otg driver.
+ * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
+ *              "do not overwrite default vaule at this address".
+ * @phy_init_sz: PHY configuration sequence size.
+ * @vbus_power: VBUS power on/off routine.
+ * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
+ * @mode: Supported mode (OTG/peripheral/host).
+ * @otg_control: OTG switch controlled by user/Id pin
+ */
+struct msm_otg_platform_data {
+       int *phy_init_seq;
+       int phy_init_sz;
+       void (*vbus_power)(bool on);
+       unsigned power_budget;
+       enum usb_dr_mode mode;
+       enum otg_control_type otg_control;
+       enum msm_usb_phy_type phy_type;
+       void (*setup_gpio)(enum usb_otg_state state);
+};
+
+/**
+ * struct msm_usb_cable - structure for exteternal connector cable
+ *                       state tracking
+ * @nb: hold event notification callback
+ * @conn: used for notification registration
+ */
+struct msm_usb_cable {
+       struct notifier_block           nb;
+       struct extcon_dev               *extcon;
+};
+
+/**
+ * struct msm_otg: OTG driver data. Shared by HCD and DCD.
+ * @otg: USB OTG Transceiver structure.
+ * @pdata: otg device platform data.
+ * @irq: IRQ number assigned for HSUSB controller.
+ * @clk: clock struct of usb_hs_clk.
+ * @pclk: clock struct of usb_hs_pclk.
+ * @core_clk: clock struct of usb_hs_core_clk.
+ * @regs: ioremapped register base address.
+ * @inputs: OTG state machine inputs(Id, SessValid etc).
+ * @sm_work: OTG state machine work.
+ * @in_lpm: indicates low power mode (LPM) state.
+ * @async_int: Async interrupt arrived.
+ * @cur_power: The amount of mA available from downstream port.
+ * @chg_work: Charger detection work.
+ * @chg_state: The state of charger detection process.
+ * @chg_type: The type of charger attached.
+ * @dcd_retires: The retry count used to track Data contact
+ *               detection process.
+ * @manual_pullup: true if VBUS is not routed to USB controller/phy
+ *     and controller driver therefore enables pull-up explicitly before
+ *     starting controller using usbcmd run/stop bit.
+ * @vbus: VBUS signal state trakining, using extcon framework
+ * @id: ID signal state trakining, using extcon framework
+ * @switch_gpio: Descriptor for GPIO used to control external Dual
+ *               SPDT USB Switch.
+ * @reboot: Used to inform the driver to route USB D+/D- line to Device
+ *         connector
+ */
+struct msm_otg {
+       struct usb_phy phy;
+       struct msm_otg_platform_data *pdata;
+       int irq;
+       struct clk *clk;
+       struct clk *pclk;
+       struct clk *core_clk;
+       void __iomem *regs;
+#define ID             0
+#define B_SESS_VLD     1
+       unsigned long inputs;
+       struct work_struct sm_work;
+       atomic_t in_lpm;
+       int async_int;
+       unsigned cur_power;
+       int phy_number;
+       struct delayed_work chg_work;
+       enum usb_chg_state chg_state;
+       enum usb_chg_type chg_type;
+       u8 dcd_retries;
+       struct regulator *v3p3;
+       struct regulator *v1p8;
+       struct regulator *vddcx;
+
+       struct reset_control *phy_rst;
+       struct reset_control *link_rst;
+       int vdd_levels[3];
+
+       bool manual_pullup;
+
+       struct msm_usb_cable vbus;
+       struct msm_usb_cable id;
+
+       struct gpio_desc *switch_gpio;
+       struct notifier_block reboot;
+};
+
 #define MSM_USB_BASE   (motg->regs)
 #define DRIVER_NAME    "msm_otg"
 
index c4bf2de6d14ec3a47259f06977e27ea1f3329ed1..6f6d2a7fd5a079149c6587c709b473582b3e5458 100644 (file)
@@ -148,7 +148,7 @@ static int omap_otg_remove(struct platform_device *pdev)
        struct otg_device *otg_dev = platform_get_drvdata(pdev);
        struct extcon_dev *edev = otg_dev->extcon;
 
-       extcon_unregister_notifier(edev, EXTCON_USB_HOST,&otg_dev->id_nb);
+       extcon_unregister_notifier(edev, EXTCON_USB_HOST, &otg_dev->id_nb);
        extcon_unregister_notifier(edev, EXTCON_USB, &otg_dev->vbus_nb);
 
        return 0;
index baeb7d23bf2467b6a0a2a6780b0bd3e07bb168e1..8fbbc2d32371a973f4f3bd166c302cf0ece29267 100644 (file)
@@ -697,7 +697,7 @@ probe_end_fifo_exit:
 probe_end_pipe_exit:
        usbhs_pipe_remove(priv);
 
-       dev_info(&pdev->dev, "probe failed\n");
+       dev_info(&pdev->dev, "probe failed (%d)\n", ret);
 
        return ret;
 }
index 7be4e7d57aced9b17c6b5d0fb7877059a280ec53..280ed5ff021bdb038f920562a0ccbed78e6ccff0 100644 (file)
@@ -810,20 +810,27 @@ static void xfer_work(struct work_struct *work)
 {
        struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
        struct usbhs_pipe *pipe = pkt->pipe;
-       struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
+       struct usbhs_fifo *fifo;
        struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
        struct dma_async_tx_descriptor *desc;
-       struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
+       struct dma_chan *chan;
        struct device *dev = usbhs_priv_to_dev(priv);
        enum dma_transfer_direction dir;
+       unsigned long flags;
 
+       usbhs_lock(priv, flags);
+       fifo = usbhs_pipe_to_fifo(pipe);
+       if (!fifo)
+               goto xfer_work_end;
+
+       chan = usbhsf_dma_chan_get(fifo, pkt);
        dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 
        desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
                                        pkt->trans, dir,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc)
-               return;
+               goto xfer_work_end;
 
        desc->callback          = usbhsf_dma_complete;
        desc->callback_param    = pipe;
@@ -831,7 +838,7 @@ static void xfer_work(struct work_struct *work)
        pkt->cookie = dmaengine_submit(desc);
        if (pkt->cookie < 0) {
                dev_err(dev, "Failed to submit dma descriptor\n");
-               return;
+               goto xfer_work_end;
        }
 
        dev_dbg(dev, "  %s %d (%d/ %d)\n",
@@ -842,6 +849,9 @@ static void xfer_work(struct work_struct *work)
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
        dma_async_issue_pending(chan);
        usbhs_pipe_enable(pipe);
+
+xfer_work_end:
+       usbhs_unlock(priv, flags);
 }
 
 /*
index 30345c2d01be526aa4e8080b15c5cfc5ebdc8d3b..50f3363cc382b8eaebfa6d1083f8f44c8c734b1d 100644 (file)
@@ -585,6 +585,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
        struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
        struct usbhs_pipe *pipe;
        int ret = -EIO;
+       unsigned long flags;
+
+       usbhs_lock(priv, flags);
 
        /*
         * if it already have pipe,
@@ -593,7 +596,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
        if (uep->pipe) {
                usbhs_pipe_clear(uep->pipe);
                usbhs_pipe_sequence_data0(uep->pipe);
-               return 0;
+               ret = 0;
+               goto usbhsg_ep_enable_end;
        }
 
        pipe = usbhs_pipe_malloc(priv,
@@ -621,6 +625,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep,
                ret = 0;
        }
 
+usbhsg_ep_enable_end:
+       usbhs_unlock(priv, flags);
+
        return ret;
 }
 
index 38b01f2aeeb0d408f6e41340355273536f0ab713..1d70add926f0ff632964ca92fc92a3f2f035fb7b 100644 (file)
@@ -23,7 +23,7 @@
 #define UGCTRL2_RESERVED_3     0x00000001      /* bit[3:0] should be B'0001 */
 #define UGCTRL2_USB0SEL_OTG    0x00000030
 
-void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
+static void usbhs_write32(struct usbhs_priv *priv, u32 reg, u32 data)
 {
        iowrite32(data, priv->base + reg);
 }
index c7508cbce3ce8b519edb5ad7cdbe5713e0d20196..9f490375ac92362dce2174fb246a2e1425dd25b7 100644 (file)
@@ -245,7 +245,7 @@ enum usbip_side {
 #define USBIP_EH_RESET         (1 << 2)
 #define USBIP_EH_UNUSABLE      (1 << 3)
 
-#define SDEV_EVENT_REMOVED   (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
+#define        SDEV_EVENT_REMOVED      (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
 #define        SDEV_EVENT_DOWN         (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
 #define        SDEV_EVENT_ERROR_TCP    (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
 #define        SDEV_EVENT_ERROR_SUBMIT (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
index 99397fa1e3f0eb7400a927157cb3e1eae6a936b4..0f98f2c7475f5d95a0eff25b3bf8e5db5408f4cf 100644 (file)
@@ -40,7 +40,7 @@ int get_gadget_descs(struct vudc *udc)
        struct usb_ctrlrequest req;
        int ret;
 
-       if (!udc || !udc->driver || !udc->pullup)
+       if (!udc->driver || !udc->pullup)
                return -EINVAL;
 
        req.bRequestType = USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
index 0efc52f11ad003ae73063fa12ca2dd7bef86ddbe..9269d568523973a007c559cc0ea8491968f161be 100644 (file)
@@ -64,14 +64,11 @@ const struct consw dummy_con = {
     .con_putcs =       DUMMY,
     .con_cursor =      DUMMY,
     .con_scroll =      DUMMY,
-    .con_bmove =       DUMMY,
     .con_switch =      DUMMY,
     .con_blank =       DUMMY,
     .con_font_set =    DUMMY,
     .con_font_get =    DUMMY,
     .con_font_default =        DUMMY,
     .con_font_copy =   DUMMY,
-    .con_set_palette = DUMMY,
-    .con_scrolldelta = DUMMY,
 };
 EXPORT_SYMBOL_GPL(dummy_con);
index afd3301ac40cc5c0ebbb093140a7a813c69ab391..b87f5cfdaea5cb364fc1043f9df4db835ca2cf5a 100644 (file)
@@ -170,8 +170,7 @@ static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
                        int height, int width);
 static int fbcon_switch(struct vc_data *vc);
 static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
-static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
-static int fbcon_scrolldelta(struct vc_data *vc, int lines);
+static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
 
 /*
  *  Internal routines
@@ -381,7 +380,7 @@ static void fb_flashcursor(struct work_struct *work)
        if (ops && ops->currcon != -1)
                vc = vc_cons[ops->currcon].d;
 
-       if (!vc || !CON_IS_VISIBLE(vc) ||
+       if (!vc || !con_is_visible(vc) ||
            registered_fb[con2fb_map[vc->vc_num]] != info ||
            vc->vc_deccm != 1) {
                console_unlock();
@@ -619,7 +618,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
                    erase,
                    vc->vc_size_row * logo_lines);
 
-       if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) {
+       if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
                fbcon_clear_margins(vc, 0);
                update_screen(vc);
        }
@@ -1113,7 +1112,7 @@ static void fbcon_init(struct vc_data *vc, int init)
         *
         * We need to do it in fbcon_init() to prevent screen corruption.
         */
-       if (CON_IS_VISIBLE(vc) && vc->vc_mode == KD_TEXT) {
+       if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
                if (info->fbops->fb_set_par &&
                    !(ops->flags & FBCON_FLAGS_INIT)) {
                        ret = info->fbops->fb_set_par(info);
@@ -1193,7 +1192,7 @@ static void fbcon_deinit(struct vc_data *vc)
        if (!ops)
                goto finished;
 
-       if (CON_IS_VISIBLE(vc))
+       if (con_is_visible(vc))
                fbcon_del_cursor_timer(info);
 
        ops->flags &= ~FBCON_FLAGS_INIT;
@@ -1398,7 +1397,7 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
        rows /= vc->vc_font.height;
        vc_resize(vc, cols, rows);
 
-       if (CON_IS_VISIBLE(vc)) {
+       if (con_is_visible(vc)) {
                update_screen(vc);
                if (softback_buf)
                        fbcon_update_softback(vc);
@@ -2146,7 +2145,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
                        return -EINVAL;
 
                DPRINTK("resize now %ix%i\n", var.xres, var.yres);
-               if (CON_IS_VISIBLE(vc)) {
+               if (con_is_visible(vc)) {
                        var.activate = FB_ACTIVATE_NOW |
                                FB_ACTIVATE_FORCE;
                        fb_set_var(info, &var);
@@ -2449,7 +2448,7 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
        int cnt;
        char *old_data = NULL;
 
-       if (CON_IS_VISIBLE(vc) && softback_lines)
+       if (con_is_visible(vc) && softback_lines)
                fbcon_set_origin(vc);
 
        resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
@@ -2530,9 +2529,9 @@ static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
                cols /= w;
                rows /= h;
                vc_resize(vc, cols, rows);
-               if (CON_IS_VISIBLE(vc) && softback_buf)
+               if (con_is_visible(vc) && softback_buf)
                        fbcon_update_softback(vc);
-       } else if (CON_IS_VISIBLE(vc)
+       } else if (con_is_visible(vc)
                   && vc->vc_mode == KD_TEXT) {
                fbcon_clear_margins(vc, 0);
                update_screen(vc);
@@ -2652,17 +2651,17 @@ static struct fb_cmap palette_cmap = {
        0, 16, palette_red, palette_green, palette_blue, NULL
 };
 
-static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
+static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
 {
        struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
        int i, j, k, depth;
        u8 val;
 
        if (fbcon_is_inactive(vc, info))
-               return -EINVAL;
+               return;
 
-       if (!CON_IS_VISIBLE(vc))
-               return 0;
+       if (!con_is_visible(vc))
+               return;
 
        depth = fb_get_color_depth(&info->var, &info->fix);
        if (depth > 3) {
@@ -2684,7 +2683,7 @@ static int fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
        } else
                fb_copy_cmap(fb_default_cmap(1 << depth), &palette_cmap);
 
-       return fb_set_cmap(&palette_cmap, info);
+       fb_set_cmap(&palette_cmap, info);
 }
 
 static u16 *fbcon_screen_pos(struct vc_data *vc, int offset)
@@ -2765,7 +2764,7 @@ static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
        }
 }
 
-static int fbcon_scrolldelta(struct vc_data *vc, int lines)
+static void fbcon_scrolldelta(struct vc_data *vc, int lines)
 {
        struct fb_info *info = registered_fb[con2fb_map[fg_console]];
        struct fbcon_ops *ops = info->fbcon_par;
@@ -2774,9 +2773,9 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
 
        if (softback_top) {
                if (vc->vc_num != fg_console)
-                       return 0;
+                       return;
                if (vc->vc_mode != KD_TEXT || !lines)
-                       return 0;
+                       return;
                if (logo_shown >= 0) {
                        struct vc_data *conp2 = vc_cons[logo_shown].d;
 
@@ -2809,11 +2808,11 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
                fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK);
                fbcon_redraw_softback(vc, disp, lines);
                fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK);
-               return 0;
+               return;
        }
 
        if (!scrollback_phys_max)
-               return -ENOSYS;
+               return;
 
        scrollback_old = scrollback_current;
        scrollback_current -= lines;
@@ -2822,10 +2821,10 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
        else if (scrollback_current > scrollback_max)
                scrollback_current = scrollback_max;
        if (scrollback_current == scrollback_old)
-               return 0;
+               return;
 
        if (fbcon_is_inactive(vc, info))
-               return 0;
+               return;
 
        fbcon_cursor(vc, CM_ERASE);
 
@@ -2852,7 +2851,6 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
 
        if (!scrollback_current)
                fbcon_cursor(vc, CM_DRAW);
-       return 0;
 }
 
 static int fbcon_set_origin(struct vc_data *vc)
@@ -2904,7 +2902,7 @@ static void fbcon_modechanged(struct fb_info *info)
        p = &fb_display[vc->vc_num];
        set_blitting_type(vc, info);
 
-       if (CON_IS_VISIBLE(vc)) {
+       if (con_is_visible(vc)) {
                var_to_display(p, &info->var, info);
                cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
                rows = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
@@ -2943,7 +2941,7 @@ static void fbcon_set_all_vcs(struct fb_info *info)
                    registered_fb[con2fb_map[i]] != info)
                        continue;
 
-               if (CON_IS_VISIBLE(vc)) {
+               if (con_is_visible(vc)) {
                        fg = i;
                        continue;
                }
@@ -3182,7 +3180,7 @@ static void fbcon_fb_blanked(struct fb_info *info, int blank)
                        registered_fb[con2fb_map[ops->currcon]] != info)
                return;
 
-       if (CON_IS_VISIBLE(vc)) {
+       if (con_is_visible(vc)) {
                if (blank)
                        do_blank_screen(0);
                else
@@ -3336,7 +3334,6 @@ static const struct consw fb_con = {
        .con_putcs              = fbcon_putcs,
        .con_cursor             = fbcon_cursor,
        .con_scroll             = fbcon_scroll,
-       .con_bmove              = fbcon_bmove,
        .con_switch             = fbcon_switch,
        .con_blank              = fbcon_blank,
        .con_font_set           = fbcon_set_font,
index 8edc062536a80e9e50ceb2ae063469c729b493c3..bacbb044d77cd8dbeb3d6cf0e308796287ff854d 100644 (file)
@@ -444,48 +444,11 @@ static void mdacon_clear(struct vc_data *c, int y, int x,
        }
 }
                         
-static void mdacon_bmove(struct vc_data *c, int sy, int sx, 
-                        int dy, int dx, int height, int width)
-{
-       u16 *src, *dest;
-
-       if (width <= 0 || height <= 0)
-               return;
-               
-       if (sx==0 && dx==0 && width==mda_num_columns) {
-               scr_memmovew(MDA_ADDR(0,dy), MDA_ADDR(0,sy), height*width*2);
-
-       } else if (dy < sy || (dy == sy && dx < sx)) {
-               src  = MDA_ADDR(sx, sy);
-               dest = MDA_ADDR(dx, dy);
-
-               for (; height > 0; height--) {
-                       scr_memmovew(dest, src, width*2);
-                       src  += mda_num_columns;
-                       dest += mda_num_columns;
-               }
-       } else {
-               src  = MDA_ADDR(sx, sy+height-1);
-               dest = MDA_ADDR(dx, dy+height-1);
-
-               for (; height > 0; height--) {
-                       scr_memmovew(dest, src, width*2);
-                       src  -= mda_num_columns;
-                       dest -= mda_num_columns;
-               }
-       }
-}
-
 static int mdacon_switch(struct vc_data *c)
 {
        return 1;       /* redrawing needed */
 }
 
-static int mdacon_set_palette(struct vc_data *c, const unsigned char *table)
-{
-       return -EINVAL;
-}
-
 static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
 {
        if (mda_type == TYPE_MDA) {
@@ -505,11 +468,6 @@ static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
        }
 }
 
-static int mdacon_scrolldelta(struct vc_data *c, int lines)
-{
-       return 0;
-}
-
 static void mdacon_cursor(struct vc_data *c, int mode)
 {
        if (mode == CM_ERASE) {
@@ -574,11 +532,8 @@ static const struct consw mda_con = {
        .con_putcs =            mdacon_putcs,
        .con_cursor =           mdacon_cursor,
        .con_scroll =           mdacon_scroll,
-       .con_bmove =            mdacon_bmove,
        .con_switch =           mdacon_switch,
        .con_blank =            mdacon_blank,
-       .con_set_palette =      mdacon_set_palette,
-       .con_scrolldelta =      mdacon_scrolldelta,
        .con_build_attr =       mdacon_build_attr,
        .con_invert_region =    mdacon_invert_region,
 };
index 0553dfe684ef96769ff712513e1f7733235f5833..e3b9521e4ec3e884f75ee7690b8abd4f90cc5eba 100644 (file)
@@ -574,17 +574,6 @@ static int newport_font_set(struct vc_data *vc, struct console_font *font, unsig
        return newport_set_font(vc->vc_num, font);
 }
 
-static int newport_set_palette(struct vc_data *vc, const unsigned char *table)
-{
-       return -EINVAL;
-}
-
-static int newport_scrolldelta(struct vc_data *vc, int lines)
-{
-       /* there is (nearly) no off-screen memory, so we can't scroll back */
-       return 0;
-}
-
 static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
                          int lines)
 {
@@ -684,34 +673,6 @@ static int newport_scroll(struct vc_data *vc, int t, int b, int dir,
        return 1;
 }
 
-static void newport_bmove(struct vc_data *vc, int sy, int sx, int dy,
-                         int dx, int h, int w)
-{
-       short xs, ys, xe, ye, xoffs, yoffs;
-
-       xs = sx << 3;
-       xe = ((sx + w) << 3) - 1;
-       /*
-        * as bmove is only used to move stuff around in the same line
-        * (h == 1), we don't care about wrap arounds caused by topscan != 0
-        */
-       ys = ((sy << 4) + topscan) & 0x3ff;
-       ye = (((sy + h) << 4) - 1 + topscan) & 0x3ff;
-       xoffs = (dx - sx) << 3;
-       yoffs = (dy - sy) << 4;
-       if (xoffs > 0) {
-               /* move to the right, exchange starting points */
-               swap(xe, xs);
-       }
-       newport_wait(npregs);
-       npregs->set.drawmode0 = (NPORT_DMODE0_S2S | NPORT_DMODE0_BLOCK |
-                                NPORT_DMODE0_DOSETUP | NPORT_DMODE0_STOPX
-                                | NPORT_DMODE0_STOPY);
-       npregs->set.xystarti = (xs << 16) | ys;
-       npregs->set.xyendi = (xe << 16) | ye;
-       npregs->go.xymove = (xoffs << 16) | yoffs;
-}
-
 static int newport_dummy(struct vc_data *c)
 {
        return 0;
@@ -729,13 +690,10 @@ const struct consw newport_con = {
        .con_putcs        = newport_putcs,
        .con_cursor       = newport_cursor,
        .con_scroll       = newport_scroll,
-       .con_bmove        = newport_bmove,
        .con_switch       = newport_switch,
        .con_blank        = newport_blank,
        .con_font_set     = newport_font_set,
        .con_font_default = newport_font_default,
-       .con_set_palette  = newport_set_palette,
-       .con_scrolldelta  = newport_scrolldelta,
        .con_set_origin   = DUMMY,
        .con_save_screen  = DUMMY
 };
index e440c2d9fe7cf51898bff9345c0dbf95881d8e6d..3a10ac19598faf8306be2954891e1b100df8cd77 100644 (file)
@@ -79,11 +79,6 @@ static const char *sticon_startup(void)
     return "STI console";
 }
 
-static int sticon_set_palette(struct vc_data *c, const unsigned char *table)
-{
-    return -EINVAL;
-}
-
 static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos)
 {
     int redraw_cursor = 0;
@@ -182,22 +177,6 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
     return 0;
 }
 
-static void sticon_bmove(struct vc_data *conp, int sy, int sx, 
-       int dy, int dx, int height, int width)
-{
-    if (!width || !height)
-           return;
-#if 0
-    if (((sy <= p->cursor_y) && (p->cursor_y < sy+height) &&
-       (sx <= p->cursor_x) && (p->cursor_x < sx+width)) ||
-       ((dy <= p->cursor_y) && (p->cursor_y < dy+height) &&
-       (dx <= p->cursor_x) && (p->cursor_x < dx+width)))
-               sticon_cursor(p, CM_ERASE /*|CM_SOFTBACK*/);
-#endif
-
-    sti_bmove(sticon_sti, sy, sx, dy, dx, height, width);
-}
-
 static void sticon_init(struct vc_data *c, int init)
 {
     struct sti_struct *sti = sticon_sti;
@@ -256,11 +235,6 @@ static int sticon_blank(struct vc_data *c, int blank, int mode_switch)
     return 1;
 }
 
-static int sticon_scrolldelta(struct vc_data *conp, int lines)
-{
-    return 0;
-}
-
 static u16 *sticon_screen_pos(struct vc_data *conp, int offset)
 {
     int line;
@@ -355,11 +329,8 @@ static const struct consw sti_con = {
        .con_putcs              = sticon_putcs,
        .con_cursor             = sticon_cursor,
        .con_scroll             = sticon_scroll,
-       .con_bmove              = sticon_bmove,
        .con_switch             = sticon_switch,
        .con_blank              = sticon_blank,
-       .con_set_palette        = sticon_set_palette,
-       .con_scrolldelta        = sticon_scrolldelta,
        .con_set_origin         = sticon_set_origin,
        .con_save_screen        = sticon_save_screen, 
        .con_build_attr         = sticon_build_attr,
index 8bf911002cbad18f9f008a0eaa125842c0002a76..11576611a974bae39a356f21ae7ed3b5d5e0a613 100644 (file)
@@ -80,7 +80,7 @@ static void vgacon_deinit(struct vc_data *c);
 static void vgacon_cursor(struct vc_data *c, int mode);
 static int vgacon_switch(struct vc_data *c);
 static int vgacon_blank(struct vc_data *c, int blank, int mode_switch);
-static int vgacon_scrolldelta(struct vc_data *c, int lines);
+static void vgacon_scrolldelta(struct vc_data *c, int lines);
 static int vgacon_set_origin(struct vc_data *c);
 static void vgacon_save_screen(struct vc_data *c);
 static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
@@ -248,18 +248,18 @@ static void vgacon_restore_screen(struct vc_data *c)
        }
 }
 
-static int vgacon_scrolldelta(struct vc_data *c, int lines)
+static void vgacon_scrolldelta(struct vc_data *c, int lines)
 {
        int start, end, count, soff;
 
        if (!lines) {
                c->vc_visible_origin = c->vc_origin;
                vga_set_mem_top(c);
-               return 1;
+               return;
        }
 
        if (!vgacon_scrollback)
-               return 1;
+               return;
 
        if (!vgacon_scrollback_save) {
                vgacon_cursor(c, CM_ERASE);
@@ -320,8 +320,6 @@ static int vgacon_scrolldelta(struct vc_data *c, int lines)
                        scr_memcpyw(d, s, diff * c->vc_size_row);
        } else
                vgacon_cursor(c, CM_MOVE);
-
-       return 1;
 }
 #else
 #define vgacon_scrollback_startup(...) do { } while (0)
@@ -334,7 +332,7 @@ static void vgacon_restore_screen(struct vc_data *c)
                vgacon_scrolldelta(c, 0);
 }
 
-static int vgacon_scrolldelta(struct vc_data *c, int lines)
+static void vgacon_scrolldelta(struct vc_data *c, int lines)
 {
        if (!lines)             /* Turn scrollback off */
                c->vc_visible_origin = c->vc_origin;
@@ -362,7 +360,6 @@ static int vgacon_scrolldelta(struct vc_data *c, int lines)
                c->vc_visible_origin = vga_vram_base + (p + ul) % we;
        }
        vga_set_mem_top(c);
-       return 1;
 }
 #endif /* CONFIG_VGACON_SOFT_SCROLLBACK */
 
@@ -592,7 +589,7 @@ static void vgacon_init(struct vc_data *c, int init)
 static void vgacon_deinit(struct vc_data *c)
 {
        /* When closing the active console, reset video origin */
-       if (CON_IS_VISIBLE(c)) {
+       if (con_is_visible(c)) {
                c->vc_visible_origin = vga_vram_base;
                vga_set_mem_top(c);
        }
@@ -859,16 +856,13 @@ static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
        }
 }
 
-static int vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
+static void vgacon_set_palette(struct vc_data *vc, const unsigned char *table)
 {
 #ifdef CAN_LOAD_PALETTE
        if (vga_video_type != VIDEO_TYPE_VGAC || vga_palette_blanked
-           || !CON_IS_VISIBLE(vc))
-               return -EINVAL;
+           || !con_is_visible(vc))
+               return;
        vga_set_palette(vc, table);
-       return 0;
-#else
-       return -EINVAL;
 #endif
 }
 
@@ -1254,7 +1248,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
                struct vc_data *c = vc_cons[i].d;
 
                if (c && c->vc_sw == &vga_con) {
-                       if (CON_IS_VISIBLE(c)) {
+                       if (con_is_visible(c)) {
                                /* void size to cause regs to be rewritten */
                                cursor_size_lastfrom = 0;
                                cursor_size_lastto = 0;
@@ -1318,7 +1312,7 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
                   return success */
                return (user) ? 0 : -EINVAL;
 
-       if (CON_IS_VISIBLE(c) && !vga_is_gfx) /* who knows */
+       if (con_is_visible(c) && !vga_is_gfx) /* who knows */
                vgacon_doresize(c, width, height);
        return 0;
 }
@@ -1427,7 +1421,6 @@ const struct consw vga_con = {
        .con_putcs = DUMMY,
        .con_cursor = vgacon_cursor,
        .con_scroll = vgacon_scroll,
-       .con_bmove = DUMMY,
        .con_switch = vgacon_switch,
        .con_blank = vgacon_blank,
        .con_font_set = vgacon_font_set,
index 076970a54f894b80366da951f220a97203466bb7..4ce10bcca18b1f600c351675142240dbe94a4022 100644 (file)
@@ -423,36 +423,7 @@ upload:
 
        return 0;
 }
-static int __init check_prereq(void)
-{
-       struct cpuinfo_x86 *c = &cpu_data(0);
-
-       if (!xen_initial_domain())
-               return -ENODEV;
-
-       if (!acpi_gbl_FADT.smi_command)
-               return -ENODEV;
-
-       if (c->x86_vendor == X86_VENDOR_INTEL) {
-               if (!cpu_has(c, X86_FEATURE_EST))
-                       return -ENODEV;
 
-               return 0;
-       }
-       if (c->x86_vendor == X86_VENDOR_AMD) {
-               /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
-                * as we get compile warnings for the static functions.
-                */
-#define CPUID_FREQ_VOLT_CAPABILITIES    0x80000007
-#define USE_HW_PSTATE                   0x00000080
-               u32 eax, ebx, ecx, edx;
-               cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
-               if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
-                       return -ENODEV;
-               return 0;
-       }
-       return -ENODEV;
-}
 /* acpi_perf_data is a pointer to percpu data. */
 static struct acpi_processor_performance __percpu *acpi_perf_data;
 
@@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
 static int __init xen_acpi_processor_init(void)
 {
        unsigned int i;
-       int rc = check_prereq();
+       int rc;
 
-       if (rc)
-               return rc;
+       if (!xen_initial_domain())
+               return -ENODEV;
 
        nr_acpi_bits = get_max_acpi_id() + 1;
        acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
index cacf30d14747baa20d5f2d8a6a999ebdcba14018..7487971f9f788b12a637216c29cc853bb933c1ca 100644 (file)
@@ -316,11 +316,18 @@ static int xenbus_write_transaction(unsigned msg_type,
                        rc = -ENOMEM;
                        goto out;
                }
+       } else {
+               list_for_each_entry(trans, &u->transactions, list)
+                       if (trans->handle.id == u->u.msg.tx_id)
+                               break;
+               if (&trans->list == &u->transactions)
+                       return -ESRCH;
        }
 
        reply = xenbus_dev_request_and_reply(&u->u.msg);
        if (IS_ERR(reply)) {
-               kfree(trans);
+               if (msg_type == XS_TRANSACTION_START)
+                       kfree(trans);
                rc = PTR_ERR(reply);
                goto out;
        }
@@ -333,12 +340,7 @@ static int xenbus_write_transaction(unsigned msg_type,
                        list_add(&trans->list, &u->transactions);
                }
        } else if (u->u.msg.type == XS_TRANSACTION_END) {
-               list_for_each_entry(trans, &u->transactions, list)
-                       if (trans->handle.id == u->u.msg.tx_id)
-                               break;
-               BUG_ON(&trans->list == &u->transactions);
                list_del(&trans->list);
-
                kfree(trans);
        }
 
index 374b12af88127c2aef2f359ef3f13013ec548fd1..22f7cd711c5792e25eac035f0aa138c9992d9bd9 100644 (file)
@@ -232,10 +232,10 @@ static void transaction_resume(void)
 void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
 {
        void *ret;
-       struct xsd_sockmsg req_msg = *msg;
+       enum xsd_sockmsg_type type = msg->type;
        int err;
 
-       if (req_msg.type == XS_TRANSACTION_START)
+       if (type == XS_TRANSACTION_START)
                transaction_start();
 
        mutex_lock(&xs_state.request_mutex);
@@ -249,12 +249,8 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg)
 
        mutex_unlock(&xs_state.request_mutex);
 
-       if (IS_ERR(ret))
-               return ret;
-
        if ((msg->type == XS_TRANSACTION_END) ||
-           ((req_msg.type == XS_TRANSACTION_START) &&
-            (msg->type == XS_ERROR)))
+           ((type == XS_TRANSACTION_START) && (msg->type == XS_ERROR)))
                transaction_end();
 
        return ret;
index f4645c51526290e0d22a1e2589dbe21bf8e7148b..e2e7c749925ad237c57758ed18186f5bed7d497b 100644 (file)
@@ -853,7 +853,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
        struct p9_fid *fid, *inode_fid;
        struct dentry *res = NULL;
 
-       if (d_unhashed(dentry)) {
+       if (d_in_lookup(dentry)) {
                res = v9fs_vfs_lookup(dir, dentry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
index a34702c998f593f60515d72fcf093cd556f5d951..1b51eaa5e2dd05445d0ba3e9c33038bcfbe4d277 100644 (file)
@@ -254,7 +254,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
        struct posix_acl *pacl = NULL, *dacl = NULL;
        struct dentry *res = NULL;
 
-       if (d_unhashed(dentry)) {
+       if (d_in_lookup(dentry)) {
                res = v9fs_vfs_lookup(dir, dentry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
index 754813a6962bc324c476b5d5035f10e761c1e28c..6c15012a75d9c6a986710961c07f23fd1dec2b46 100644 (file)
@@ -1687,7 +1687,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
  * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
  * causes the writes to be flagged as synchronous writes.
  */
-static int __block_write_full_page(struct inode *inode, struct page *page,
+int __block_write_full_page(struct inode *inode, struct page *page,
                        get_block_t *get_block, struct writeback_control *wbc,
                        bh_end_io_t *handler)
 {
@@ -1848,6 +1848,7 @@ recover:
        unlock_page(page);
        goto done;
 }
+EXPORT_SYMBOL(__block_write_full_page);
 
 /*
  * If a page has any new buffers, zero them out here, and mark them uptodate
index ce2f5795e44bc73d404817a884cd5ecb0664802d..0daaf7ceedc55f3769abb441a2477ea9fe4f0f75 100644 (file)
@@ -394,7 +394,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
                err = ceph_handle_notrace_create(dir, dentry);
 
-       if (d_unhashed(dentry)) {
+       if (d_in_lookup(dentry)) {
                dn = ceph_finish_lookup(req, dentry, err);
                if (IS_ERR(dn))
                        err = PTR_ERR(dn);
index 687471dc04a0a515f4a8dc4953297e6156880b64..6edd825231c5d3d323d2e58cd2ac97189ab61470 100644 (file)
@@ -92,7 +92,7 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
                }
 
                if (i < CHRDEV_MAJOR_DYN_END)
-                       pr_warn("CHRDEV \"%s\" major number %d goes below the dynamic allocation range",
+                       pr_warn("CHRDEV \"%s\" major number %d goes below the dynamic allocation range\n",
                                name, i);
 
                if (i == 0) {
index c3eb998a99bd18a2ed9b7b843c99be15fedab9df..fb0903fffc22c8738c34f958beb7784400a93339 100644 (file)
@@ -445,7 +445,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                 * Check for hashed negative dentry. We have already revalidated
                 * the dentry and it is fine. No need to perform another lookup.
                 */
-               if (!d_unhashed(direntry))
+               if (!d_in_lookup(direntry))
                        return -ENOENT;
 
                res = cifs_lookup(inode, direntry, 0);
index 33b7ee34eda5f135fef480f0bdf55bb4037ab334..bbc1252a59f5f1431ee779e16780f19a5f47a5d8 100644 (file)
@@ -357,8 +357,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
 
        len = simple_write_to_buffer(buffer->bin_buffer,
                        buffer->bin_buffer_size, ppos, buf, count);
-       if (len > 0)
-               *ppos += len;
 out:
        mutex_unlock(&buffer->mutex);
        return len;
index 0d8eb3455b34d68cde59a48b5b46da9b67498445..e5e29f8c920b18bc6959cdb16d2ee57fec7d45a6 100644 (file)
@@ -45,7 +45,7 @@
  * ecryptfs_to_hex
  * @dst: Buffer to take hex character representation of contents of
  *       src; must be at least of size (src_size * 2)
- * @src: Buffer to be converted to a hex string respresentation
+ * @src: Buffer to be converted to a hex string representation
  * @src_size: number of bytes to convert
  */
 void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
@@ -60,7 +60,7 @@ void ecryptfs_to_hex(char *dst, char *src, size_t src_size)
  * ecryptfs_from_hex
  * @dst: Buffer to take the bytes from src hex; must be at least of
  *       size (src_size / 2)
- * @src: Buffer to be converted from a hex string respresentation to raw value
+ * @src: Buffer to be converted from a hex string representation to raw value
  * @dst_size: size of dst buffer, or number of hex characters pairs to convert
  */
 void ecryptfs_from_hex(char *dst, char *src, int dst_size)
@@ -953,7 +953,7 @@ struct ecryptfs_cipher_code_str_map_elem {
 };
 
 /* Add support for additional ciphers by adding elements here. The
- * cipher_code is whatever OpenPGP applicatoins use to identify the
+ * cipher_code is whatever OpenPGP applications use to identify the
  * ciphers. List in order of probability. */
 static struct ecryptfs_cipher_code_str_map_elem
 ecryptfs_cipher_code_str_map[] = {
@@ -1410,7 +1410,7 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
  *
  * Common entry point for reading file metadata. From here, we could
  * retrieve the header information from the header region of the file,
- * the xattr region of the file, or some other repostory that is
+ * the xattr region of the file, or some other repository that is
  * stored separately from the file itself. The current implementation
  * supports retrieving the metadata information from the file contents
  * and from the xattr region.
index 7000b96b783ef04a56f056a83df595c8093533aa..ca4e83750214adc2b0ea77358937c167d02d6ba7 100644 (file)
@@ -169,9 +169,22 @@ out:
        return rc;
 }
 
+static int ecryptfs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct file *lower_file = ecryptfs_file_to_lower(file);
+       /*
+        * Don't allow mmap on top of file systems that don't support it
+        * natively.  If FILESYSTEM_MAX_STACK_DEPTH > 2 or ecryptfs
+        * allows recursive mounting, this will need to be extended.
+        */
+       if (!lower_file->f_op->mmap)
+               return -ENODEV;
+       return generic_file_mmap(file, vma);
+}
+
 /**
  * ecryptfs_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
  * @file: Structure to return filled in
  *
  * Opens the file specified by inode.
@@ -240,7 +253,7 @@ out:
 
 /**
  * ecryptfs_dir_open
- * @inode: inode speciying file to open
+ * @inode: inode specifying file to open
  * @file: Structure to return filled in
  *
  * Opens the file specified by inode.
@@ -403,7 +416,7 @@ const struct file_operations ecryptfs_main_fops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
 #endif
-       .mmap = generic_file_mmap,
+       .mmap = ecryptfs_mmap,
        .open = ecryptfs_open,
        .flush = ecryptfs_flush,
        .release = ecryptfs_release,
index e818f5ac7a2692b6bb5c1a132d7bbfd967ad956a..866bb18efefea9953250ba1cbdc145e7d4be49af 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/slab.h>
 #include <linux/wait.h>
 #include <linux/mount.h>
-#include <linux/file.h>
 #include "ecryptfs_kernel.h"
 
 struct ecryptfs_open_req {
@@ -148,7 +147,7 @@ int ecryptfs_privileged_open(struct file **lower_file,
        flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR;
        (*lower_file) = dentry_open(&req.path, flags, cred);
        if (!IS_ERR(*lower_file))
-               goto have_file;
+               goto out;
        if ((flags & O_ACCMODE) == O_RDONLY) {
                rc = PTR_ERR((*lower_file));
                goto out;
@@ -166,16 +165,8 @@ int ecryptfs_privileged_open(struct file **lower_file,
        mutex_unlock(&ecryptfs_kthread_ctl.mux);
        wake_up(&ecryptfs_kthread_ctl.wait);
        wait_for_completion(&req.done);
-       if (IS_ERR(*lower_file)) {
+       if (IS_ERR(*lower_file))
                rc = PTR_ERR(*lower_file);
-               goto out;
-       }
-have_file:
-       if ((*lower_file)->f_op->mmap == NULL) {
-               fput(*lower_file);
-               *lower_file = NULL;
-               rc = -EMEDIUMTYPE;
-       }
 out:
        return rc;
 }
index 1698132d0e576d4fea3690f56190242de33645fc..6120044951415d7840308eea25a21bd125085109 100644 (file)
@@ -738,8 +738,7 @@ static void ecryptfs_free_kmem_caches(void)
                struct ecryptfs_cache_info *info;
 
                info = &ecryptfs_cache_infos[i];
-               if (*(info->cache))
-                       kmem_cache_destroy(*(info->cache));
+               kmem_cache_destroy(*(info->cache));
        }
 }
 
index 989a2cef6b765023b670d75af4fb3bdbf6f650e6..fe7e83a45efffeb85329b194b03208e6223817e3 100644 (file)
@@ -483,9 +483,9 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
                goto out_free;
        }
        inode->i_state |= I_WB_SWITCH;
+       __iget(inode);
        spin_unlock(&inode->i_lock);
 
-       ihold(inode);
        isw->inode = inode;
 
        atomic_inc(&isw_nr_in_flight);
index 264f07c7754e270b61bed27b0e6296f19bf2f7b3..cca7b048c07b26e4919769fed461b46771005a19 100644 (file)
@@ -480,7 +480,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
        struct fuse_conn *fc = get_fuse_conn(dir);
        struct dentry *res = NULL;
 
-       if (d_unhashed(entry)) {
+       if (d_in_lookup(entry)) {
                res = fuse_lookup(dir, entry, 0);
                if (IS_ERR(res))
                        return PTR_ERR(res);
index 37b7bc14c8da578a89282bc4fab2cadd93ed441c..82df368869388ac2cc7b084620da8f53399df582 100644 (file)
@@ -140,6 +140,32 @@ static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
        return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
 }
 
+/* This is the same as calling block_write_full_page, but it also
+ * writes pages outside of i_size
+ */
+int gfs2_write_full_page(struct page *page, get_block_t *get_block,
+                        struct writeback_control *wbc)
+{
+       struct inode * const inode = page->mapping->host;
+       loff_t i_size = i_size_read(inode);
+       const pgoff_t end_index = i_size >> PAGE_SHIFT;
+       unsigned offset;
+
+       /*
+        * The page straddles i_size.  It must be zeroed out on each and every
+        * writepage invocation because it may be mmapped.  "A file is mapped
+        * in multiples of the page size.  For a file that is not a multiple of
+        * the  page size, the remaining memory is zeroed when mapped, and
+        * writes to that region are not written out to the file."
+        */
+       offset = i_size & (PAGE_SIZE-1);
+       if (page->index == end_index && offset)
+               zero_user_segment(page, offset, PAGE_SIZE);
+
+       return __block_write_full_page(inode, page, get_block, wbc,
+                                      end_buffer_async_write);
+}
+
 /**
  * __gfs2_jdata_writepage - The core of jdata writepage
  * @page: The page to write
@@ -165,7 +191,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
                }
                gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
        }
-       return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
+       return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
 }
 
 /**
@@ -180,27 +206,20 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
 {
        struct inode *inode = page->mapping->host;
+       struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        int ret;
-       int done_trans = 0;
 
-       if (PageChecked(page)) {
-               if (wbc->sync_mode != WB_SYNC_ALL)
-                       goto out_ignore;
-               ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
-               if (ret)
-                       goto out_ignore;
-               done_trans = 1;
-       }
-       ret = gfs2_writepage_common(page, wbc);
-       if (ret > 0)
-               ret = __gfs2_jdata_writepage(page, wbc);
-       if (done_trans)
-               gfs2_trans_end(sdp);
+       if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
+               goto out;
+       if (PageChecked(page) || current->journal_info)
+               goto out_ignore;
+       ret = __gfs2_jdata_writepage(page, wbc);
        return ret;
 
 out_ignore:
        redirty_page_for_writepage(wbc, page);
+out:
        unlock_page(page);
        return 0;
 }
index 30822b148f3e607b97678a2e3b1224b3f30d422f..5173b98ca0368c7c047585ac0ca2bdb3b3b91b15 100644 (file)
@@ -117,7 +117,7 @@ static int gfs2_dentry_delete(const struct dentry *dentry)
                return 0;
 
        ginode = GFS2_I(d_inode(dentry));
-       if (!ginode->i_iopen_gh.gh_gl)
+       if (!gfs2_holder_initialized(&ginode->i_iopen_gh))
                return 0;
 
        if (test_bit(GLF_DEMOTE, &ginode->i_iopen_gh.gh_gl->gl_flags))
index 271d93905bac6eed449f6d968324497f22a38301..e30cc9fb2befb3a1344ac03f9b40e6356c10fa32 100644 (file)
@@ -1663,7 +1663,8 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name,
                brelse(bh);
                if (fail_on_exist)
                        return ERR_PTR(-EEXIST);
-               inode = gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino);
+               inode = gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino,
+                                         GFS2_BLKST_FREE /* ignore */);
                if (!IS_ERR(inode))
                        GFS2_I(inode)->i_rahead = rahead;
                return inode;
index d5bda851345742b430edd71b8c3f2b8f698edf80..a332f3cd925ef86f1e0c81588fea93b030fe6449 100644 (file)
@@ -137,21 +137,10 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct inode *inode;
 
-       inode = gfs2_ilookup(sb, inum->no_addr);
-       if (inode) {
-               if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) {
-                       iput(inode);
-                       return ERR_PTR(-ESTALE);
-               }
-               goto out_inode;
-       }
-
        inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
                                    GFS2_BLKST_DINODE);
        if (IS_ERR(inode))
                return ERR_CAST(inode);
-
-out_inode:
        return d_obtain_alias(inode);
 }
 
index e0f98e483aec1a1aa20ff3fe7c416cd017adf6c7..320e65e61938a5fc6d8ad387efa2505c78ebca12 100644 (file)
@@ -1098,7 +1098,7 @@ static void do_unflock(struct file *file, struct file_lock *fl)
 
        mutex_lock(&fp->f_fl_mutex);
        locks_lock_file_wait(file, fl);
-       if (fl_gh->gh_gl) {
+       if (gfs2_holder_initialized(fl_gh)) {
                gfs2_glock_dq(fl_gh);
                gfs2_holder_uninit(fl_gh);
        }
index 706fd9352f368818391ad79f4481feffe0809b60..3a90b2b5b9bb3f0ecab5e86b1f0224a64f27d661 100644 (file)
@@ -575,7 +575,6 @@ static void delete_work_func(struct work_struct *work)
 {
        struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
        struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
-       struct gfs2_inode *ip;
        struct inode *inode;
        u64 no_addr = gl->gl_name.ln_number;
 
@@ -585,13 +584,7 @@ static void delete_work_func(struct work_struct *work)
        if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
                goto out;
 
-       ip = gl->gl_object;
-       /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
-
-       if (ip)
-               inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
-       else
-               inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
+       inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
        if (inode && !IS_ERR(inode)) {
                d_prune_aliases(inode);
                iput(inode);
@@ -808,7 +801,7 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
 {
        put_pid(gh->gh_owner_pid);
        gfs2_glock_put(gh->gh_gl);
-       gh->gh_gl = NULL;
+       gfs2_holder_mark_uninitialized(gh);
        gh->gh_ip = 0;
 }
 
index 46ab67fc16daa91c2fbb44e5fc64668c306745ce..ab1ef322f7a53e47794d36409097a9bd4f4264c1 100644 (file)
@@ -247,4 +247,14 @@ extern void gfs2_unregister_debugfs(void);
 
 extern const struct lm_lockops gfs2_dlm_ops;
 
+static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
+{
+       gh->gh_gl = NULL;
+}
+
+static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
+{
+       return gh->gh_gl;
+}
+
 #endif /* __GLOCK_DOT_H__ */
index 21dc784f66c2268d2e857314104847b600cc09de..e0621cacf13483807a8169d75aea74a6c103afd8 100644 (file)
 #include "super.h"
 #include "glops.h"
 
-struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
+static int iget_test(struct inode *inode, void *opaque)
 {
-       return ilookup(sb, (unsigned long)no_addr);
+       u64 no_addr = *(u64 *)opaque;
+
+       return GFS2_I(inode)->i_no_addr == no_addr;
+}
+
+static int iget_set(struct inode *inode, void *opaque)
+{
+       u64 no_addr = *(u64 *)opaque;
+
+       GFS2_I(inode)->i_no_addr = no_addr;
+       inode->i_ino = no_addr;
+       return 0;
+}
+
+static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
+{
+       struct inode *inode;
+
+repeat:
+       inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
+       if (!inode)
+               return inode;
+       if (is_bad_inode(inode)) {
+               iput(inode);
+               goto repeat;
+       }
+       return inode;
 }
 
 /**
@@ -78,26 +104,37 @@ static void gfs2_set_iop(struct inode *inode)
 /**
  * gfs2_inode_lookup - Lookup an inode
  * @sb: The super block
- * @no_addr: The inode number
  * @type: The type of the inode
+ * @no_addr: The inode number
+ * @no_formal_ino: The inode generation number
+ * @blktype: Requested block type (GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED;
+ *           GFS2_BLKST_FREE do indicate not to verify)
+ *
+ * If @type is DT_UNKNOWN, the inode type is fetched from disk.
+ *
+ * If @blktype is anything other than GFS2_BLKST_FREE (which is used as a
+ * placeholder because it doesn't otherwise make sense), the on-disk block type
+ * is verified to be @blktype.
  *
  * Returns: A VFS inode, or an error
  */
 
 struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
-                               u64 no_addr, u64 no_formal_ino)
+                               u64 no_addr, u64 no_formal_ino,
+                               unsigned int blktype)
 {
        struct inode *inode;
        struct gfs2_inode *ip;
        struct gfs2_glock *io_gl = NULL;
+       struct gfs2_holder i_gh;
        int error;
 
-       inode = iget_locked(sb, (unsigned long)no_addr);
+       gfs2_holder_mark_uninitialized(&i_gh);
+       inode = gfs2_iget(sb, no_addr);
        if (!inode)
                return ERR_PTR(-ENOMEM);
 
        ip = GFS2_I(inode);
-       ip->i_no_addr = no_addr;
 
        if (inode->i_state & I_NEW) {
                struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -112,10 +149,29 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                if (unlikely(error))
                        goto fail_put;
 
+               if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
+                       /*
+                        * The GL_SKIP flag indicates to skip reading the inode
+                        * block.  We read the inode with gfs2_inode_refresh
+                        * after possibly checking the block type.
+                        */
+                       error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
+                                                  GL_SKIP, &i_gh);
+                       if (error)
+                               goto fail_put;
+
+                       if (blktype != GFS2_BLKST_FREE) {
+                               error = gfs2_check_blk_type(sdp, no_addr,
+                                                           blktype);
+                               if (error)
+                                       goto fail_put;
+                       }
+               }
+
                set_bit(GIF_INVALID, &ip->i_flags);
                error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
                if (unlikely(error))
-                       goto fail_iopen;
+                       goto fail_put;
 
                ip->i_iopen_gh.gh_gl->gl_object = ip;
                gfs2_glock_put(io_gl);
@@ -134,6 +190,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
                unlock_new_inode(inode);
        }
 
+       if (gfs2_holder_initialized(&i_gh))
+               gfs2_glock_dq_uninit(&i_gh);
        return inode;
 
 fail_refresh:
@@ -141,10 +199,11 @@ fail_refresh:
        ip->i_iopen_gh.gh_gl->gl_object = NULL;
        gfs2_glock_dq_wait(&ip->i_iopen_gh);
        gfs2_holder_uninit(&ip->i_iopen_gh);
-fail_iopen:
+fail_put:
        if (io_gl)
                gfs2_glock_put(io_gl);
-fail_put:
+       if (gfs2_holder_initialized(&i_gh))
+               gfs2_glock_dq_uninit(&i_gh);
        ip->i_gl->gl_object = NULL;
 fail:
        iget_failed(inode);
@@ -155,23 +214,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
                                  u64 *no_formal_ino, unsigned int blktype)
 {
        struct super_block *sb = sdp->sd_vfs;
-       struct gfs2_holder i_gh;
-       struct inode *inode = NULL;
+       struct inode *inode;
        int error;
 
-       /* Must not read in block until block type is verified */
-       error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
-                                 LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
-       if (error)
-               return ERR_PTR(error);
-
-       error = gfs2_check_blk_type(sdp, no_addr, blktype);
-       if (error)
-               goto fail;
-
-       inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
+       inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, blktype);
        if (IS_ERR(inode))
-               goto fail;
+               return inode;
 
        /* Two extra checks for NFS only */
        if (no_formal_ino) {
@@ -182,16 +230,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
                error = -EIO;
                if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
                        goto fail_iput;
-
-               error = 0;
        }
+       return inode;
 
-fail:
-       gfs2_glock_dq_uninit(&i_gh);
-       return error ? ERR_PTR(error) : inode;
 fail_iput:
        iput(inode);
-       goto fail;
+       return ERR_PTR(error);
 }
 
 
@@ -236,8 +280,8 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
        struct gfs2_holder d_gh;
        int error = 0;
        struct inode *inode = NULL;
-       int unlock = 0;
 
+       gfs2_holder_mark_uninitialized(&d_gh);
        if (!name->len || name->len > GFS2_FNAMESIZE)
                return ERR_PTR(-ENAMETOOLONG);
 
@@ -252,7 +296,6 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
                error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
                if (error)
                        return ERR_PTR(error);
-               unlock = 1;
        }
 
        if (!is_root) {
@@ -265,7 +308,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
        if (IS_ERR(inode))
                error = PTR_ERR(inode);
 out:
-       if (unlock)
+       if (gfs2_holder_initialized(&d_gh))
                gfs2_glock_dq_uninit(&d_gh);
        if (error == -ENOENT)
                return NULL;
@@ -1189,7 +1232,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
        struct dentry *d;
        bool excl = !!(flags & O_EXCL);
 
-       if (!d_unhashed(dentry))
+       if (!d_in_lookup(dentry))
                goto skip_lookup;
 
        d = __gfs2_lookup(dir, dentry, file, opened);
@@ -1309,7 +1352,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
        struct gfs2_inode *ip = GFS2_I(d_inode(odentry));
        struct gfs2_inode *nip = NULL;
        struct gfs2_sbd *sdp = GFS2_SB(odir);
-       struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
+       struct gfs2_holder ghs[5], r_gh;
        struct gfs2_rgrpd *nrgd;
        unsigned int num_gh;
        int dir_rename = 0;
@@ -1317,6 +1360,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
        unsigned int x;
        int error;
 
+       gfs2_holder_mark_uninitialized(&r_gh);
        if (d_really_is_positive(ndentry)) {
                nip = GFS2_I(d_inode(ndentry));
                if (ip == nip)
@@ -1506,7 +1550,7 @@ out_gunlock:
                gfs2_holder_uninit(ghs + x);
        }
 out_gunlock_r:
-       if (r_gh.gh_gl)
+       if (gfs2_holder_initialized(&r_gh))
                gfs2_glock_dq_uninit(&r_gh);
 out:
        return error;
@@ -1532,13 +1576,14 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
        struct gfs2_inode *oip = GFS2_I(odentry->d_inode);
        struct gfs2_inode *nip = GFS2_I(ndentry->d_inode);
        struct gfs2_sbd *sdp = GFS2_SB(odir);
-       struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
+       struct gfs2_holder ghs[5], r_gh;
        unsigned int num_gh;
        unsigned int x;
        umode_t old_mode = oip->i_inode.i_mode;
        umode_t new_mode = nip->i_inode.i_mode;
        int error;
 
+       gfs2_holder_mark_uninitialized(&r_gh);
        error = gfs2_rindex_update(sdp);
        if (error)
                return error;
@@ -1646,7 +1691,7 @@ out_gunlock:
                gfs2_holder_uninit(ghs + x);
        }
 out_gunlock_r:
-       if (r_gh.gh_gl)
+       if (gfs2_holder_initialized(&r_gh))
                gfs2_glock_dq_uninit(&r_gh);
 out:
        return error;
@@ -1743,9 +1788,8 @@ int gfs2_permission(struct inode *inode, int mask)
        struct gfs2_inode *ip;
        struct gfs2_holder i_gh;
        int error;
-       int unlock = 0;
-
 
+       gfs2_holder_mark_uninitialized(&i_gh);
        ip = GFS2_I(inode);
        if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
                if (mask & MAY_NOT_BLOCK)
@@ -1753,14 +1797,13 @@ int gfs2_permission(struct inode *inode, int mask)
                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
                if (error)
                        return error;
-               unlock = 1;
        }
 
        if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
                error = -EACCES;
        else
                error = generic_permission(inode, mask);
-       if (unlock)
+       if (gfs2_holder_initialized(&i_gh))
                gfs2_glock_dq_uninit(&i_gh);
 
        return error;
@@ -1932,17 +1975,16 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_holder gh;
        int error;
-       int unlock = 0;
 
+       gfs2_holder_mark_uninitialized(&gh);
        if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
                error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
                if (error)
                        return error;
-               unlock = 1;
        }
 
        generic_fillattr(inode, stat);
-       if (unlock)
+       if (gfs2_holder_initialized(&gh))
                gfs2_glock_dq_uninit(&gh);
 
        return 0;
index e1af0d4aa308ef83539771d0f73e5293cf557e69..7710dfd3af35023072c7cdc33bf9f5a4b5ffb78b 100644 (file)
@@ -94,11 +94,11 @@ err:
 }
 
 extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 
-                                      u64 no_addr, u64 no_formal_ino);
+                                      u64 no_addr, u64 no_formal_ino,
+                                      unsigned int blktype);
 extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
                                         u64 *no_formal_ino,
                                         unsigned int blktype);
-extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
 
 extern int gfs2_inode_refresh(struct gfs2_inode *ip);
 
index d5369a109781d990317cf634f3f8537c38a5dd7c..8e3ba20d5e9dfcf4d7d4450a3bc2c0d9a9c5db44 100644 (file)
@@ -535,9 +535,9 @@ static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
        if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
                return 0;
 
-       gfs2_replay_incr_blk(sdp, &start);
+       gfs2_replay_incr_blk(jd, &start);
 
-       for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
+       for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
                blkno = be64_to_cpu(*ptr++);
 
                jd->jd_found_blocks++;
@@ -693,7 +693,7 @@ static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
 
        offset = sizeof(struct gfs2_log_descriptor);
 
-       for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
+       for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
                error = gfs2_replay_read_block(jd, start, &bh);
                if (error)
                        return error;
@@ -762,7 +762,6 @@ static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
                                    __be64 *ptr, int pass)
 {
        struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
-       struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
        struct gfs2_glock *gl = ip->i_gl;
        unsigned int blks = be32_to_cpu(ld->ld_data1);
        struct buffer_head *bh_log, *bh_ip;
@@ -773,8 +772,8 @@ static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
        if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
                return 0;
 
-       gfs2_replay_incr_blk(sdp, &start);
-       for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
+       gfs2_replay_incr_blk(jd, &start);
+       for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
                blkno = be64_to_cpu(*ptr++);
                esc = be64_to_cpu(*ptr++);
 
index f99f8e94de3f3a65ff53839a282a160fa2ac41b2..74fd0139e6c2edf8db28af3423ff6f5680e6a0e8 100644 (file)
@@ -45,6 +45,7 @@ static void gfs2_init_inode_once(void *foo)
        memset(&ip->i_res, 0, sizeof(ip->i_res));
        RB_CLEAR_NODE(&ip->i_res.rs_node);
        ip->i_hash_cache = NULL;
+       gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
 }
 
 static void gfs2_init_glock_once(void *foo)
index 45463600fb81d34e48d328e63bf9ef389e0dd363..b8f6fc9513ef1e1aac4db567c8a4bdf003f2b30b 100644 (file)
@@ -454,7 +454,8 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
        struct dentry *dentry;
        struct inode *inode;
 
-       inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
+       inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
+                                 GFS2_BLKST_FREE /* ignore */);
        if (IS_ERR(inode)) {
                fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
                return PTR_ERR(inode);
index ce7d69a2fdc03915faf2f27b26168b35bd7527a6..6c657b202501511afa69ef64a9871339dca86bf8 100644 (file)
@@ -883,7 +883,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
        gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
                              &data_blocks, &ind_blocks);
 
-       ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
+       ghs = kmalloc(num_qd * sizeof(struct gfs2_holder), GFP_NOFS);
        if (!ghs)
                return -ENOMEM;
 
index 1b645773c98e2499ff111606c5d0f6290f1ea7ca..113b6095a58dd000e2d2c23e71435a1d065fbfb9 100644 (file)
@@ -338,7 +338,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
                        struct gfs2_log_header_host lh;
                        error = get_log_header(jd, start, &lh);
                        if (!error) {
-                               gfs2_replay_incr_blk(sdp, &start);
+                               gfs2_replay_incr_blk(jd, &start);
                                brelse(bh);
                                continue;
                        }
@@ -360,7 +360,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
                }
 
                while (length--)
-                       gfs2_replay_incr_blk(sdp, &start);
+                       gfs2_replay_incr_blk(jd, &start);
 
                brelse(bh);
        }
@@ -390,7 +390,7 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
        struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 
        lblock = head->lh_blkno;
-       gfs2_replay_incr_blk(sdp, &lblock);
+       gfs2_replay_incr_blk(jd, &lblock);
        bh_map.b_size = 1 << ip->i_inode.i_blkbits;
        error = gfs2_block_map(&ip->i_inode, lblock, &bh_map, 0);
        if (error)
index 6142836cce961484acb8139d4d86683f7b8daa15..11fdfab4bf99d800af39dcbf69d726142a9af563 100644 (file)
@@ -14,9 +14,9 @@
 
 extern struct workqueue_struct *gfs_recovery_wq;
 
-static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk)
+static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, unsigned int *blk)
 {
-       if (++*blk == sdp->sd_jdesc->jd_blocks)
+       if (++*blk == jd->jd_blocks)
                *blk = 0;
 }
 
index 5bd216901e89334186d74751f62ff36ef876ff71..86ccc01593937d93b7e5901ec26ed30bd5a4ab32 100644 (file)
@@ -658,6 +658,7 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
        if (rgd) {
                spin_lock(&rgd->rd_rsspin);
                __rs_deltree(rs);
+               BUG_ON(rs->rs_free);
                spin_unlock(&rgd->rd_rsspin);
        }
 }
@@ -671,10 +672,8 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
 void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
 {
        down_write(&ip->i_rw_mutex);
-       if ((wcount == NULL) || (atomic_read(wcount) <= 1)) {
+       if ((wcount == NULL) || (atomic_read(wcount) <= 1))
                gfs2_rs_deltree(&ip->i_res);
-               BUG_ON(ip->i_res.rs_free);
-       }
        up_write(&ip->i_rw_mutex);
        gfs2_qa_delete(ip, wcount);
 }
@@ -722,6 +721,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
 
                gfs2_free_clones(rgd);
                kfree(rgd->rd_bits);
+               rgd->rd_bits = NULL;
                return_all_reservations(rgd);
                kmem_cache_free(gfs2_rgrpd_cachep, rgd);
        }
@@ -916,9 +916,6 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        if (error)
                goto fail;
 
-       rgd->rd_gl->gl_object = rgd;
-       rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
-       rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
        rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
        rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
        if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -926,14 +923,20 @@ static int read_rindex_entry(struct gfs2_inode *ip)
        spin_lock(&sdp->sd_rindex_spin);
        error = rgd_insert(rgd);
        spin_unlock(&sdp->sd_rindex_spin);
-       if (!error)
+       if (!error) {
+               rgd->rd_gl->gl_object = rgd;
+               rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
+               rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
+                                                   rgd->rd_length) * bsize) - 1;
                return 0;
+       }
 
        error = 0; /* someone else read in the rgrp; free it and ignore it */
        gfs2_glock_put(rgd->rd_gl);
 
 fail:
        kfree(rgd->rd_bits);
+       rgd->rd_bits = NULL;
        kmem_cache_free(gfs2_rgrpd_cachep, rgd);
        return error;
 }
@@ -2096,7 +2099,7 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
 {
        struct gfs2_blkreserv *rs = &ip->i_res;
 
-       if (rs->rs_rgd_gh.gh_gl)
+       if (gfs2_holder_initialized(&rs->rs_rgd_gh))
                gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
 }
 
@@ -2596,7 +2599,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
 {
        unsigned int x;
 
-       rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
+       rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
                                GFP_NOFS | __GFP_NOFAIL);
        for (x = 0; x < rlist->rl_rgrps; x++)
                gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
index 9b2ff353e45f6e5317788536c1523f0f8d1c3f68..3a7e60bb39f8f60a66b3178b860449f901af553e 100644 (file)
@@ -855,7 +855,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
        wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
        gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
 
-       if (freeze_gh.gh_gl)
+       if (gfs2_holder_initialized(&freeze_gh))
                gfs2_glock_dq_uninit(&freeze_gh);
 
        gfs2_quota_cleanup(sdp);
@@ -1033,7 +1033,7 @@ static int gfs2_unfreeze(struct super_block *sb)
 
        mutex_lock(&sdp->sd_freeze_mutex);
         if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
-           sdp->sd_freeze_gh.gh_gl == NULL) {
+           !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
                mutex_unlock(&sdp->sd_freeze_mutex);
                 return 0;
        }
@@ -1084,9 +1084,11 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
        int error = 0, err;
 
        memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
-       gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
+       gha = kmalloc(slots * sizeof(struct gfs2_holder), GFP_KERNEL);
        if (!gha)
                return -ENOMEM;
+       for (x = 0; x < slots; x++)
+               gfs2_holder_mark_uninitialized(gha + x);
 
        rgd_next = gfs2_rgrpd_get_first(sdp);
 
@@ -1096,7 +1098,7 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
                for (x = 0; x < slots; x++) {
                        gh = gha + x;
 
-                       if (gh->gh_gl && gfs2_glock_poll(gh)) {
+                       if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
                                err = gfs2_glock_wait(gh);
                                if (err) {
                                        gfs2_holder_uninit(gh);
@@ -1109,7 +1111,7 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
                                }
                        }
 
-                       if (gh->gh_gl)
+                       if (gfs2_holder_initialized(gh))
                                done = 0;
                        else if (rgd_next && !error) {
                                error = gfs2_glock_nq_init(rgd_next->rd_gl,
@@ -1304,9 +1306,11 @@ static int gfs2_drop_inode(struct inode *inode)
 {
        struct gfs2_inode *ip = GFS2_I(inode);
 
-       if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) && inode->i_nlink) {
+       if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
+           inode->i_nlink &&
+           gfs2_holder_initialized(&ip->i_iopen_gh)) {
                struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
-               if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
+               if (test_bit(GLF_DEMOTE, &gl->gl_flags))
                        clear_nlink(inode);
        }
        return generic_drop_inode(inode);
@@ -1551,7 +1555,7 @@ static void gfs2_evict_inode(struct inode *inode)
                        goto out_truncate;
        }
 
-       if (ip->i_iopen_gh.gh_gl &&
+       if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
            test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
                ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq_wait(&ip->i_iopen_gh);
@@ -1610,7 +1614,7 @@ out_unlock:
        if (gfs2_rs_active(&ip->i_res))
                gfs2_rs_deltree(&ip->i_res);
 
-       if (ip->i_iopen_gh.gh_gl) {
+       if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
                if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
                        ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
                        gfs2_glock_dq_wait(&ip->i_iopen_gh);
@@ -1632,7 +1636,7 @@ out:
        gfs2_glock_add_to_lru(ip->i_gl);
        gfs2_glock_put(ip->i_gl);
        ip->i_gl = NULL;
-       if (ip->i_iopen_gh.gh_gl) {
+       if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
                ip->i_iopen_gh.gh_gl->gl_object = NULL;
                ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
                gfs2_glock_dq_wait(&ip->i_iopen_gh);
index d8015a03db4c40c114abd6e2afeab732a7eb565f..19d93d0cd400f5ac175a9d257ec8a8ced0e04c21 100644 (file)
@@ -1485,11 +1485,13 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                    struct file *file, unsigned open_flags,
                    umode_t mode, int *opened)
 {
+       DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
        struct nfs_open_context *ctx;
        struct dentry *res;
        struct iattr attr = { .ia_valid = ATTR_OPEN };
        struct inode *inode;
        unsigned int lookup_flags = 0;
+       bool switched = false;
        int err;
 
        /* Expect a negative dentry */
@@ -1504,7 +1506,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
 
        /* NFS only supports OPEN on regular files */
        if ((open_flags & O_DIRECTORY)) {
-               if (!d_unhashed(dentry)) {
+               if (!d_in_lookup(dentry)) {
                        /*
                         * Hashed negative dentry with O_DIRECTORY: dentry was
                         * revalidated and is fine, no need to perform lookup
@@ -1528,6 +1530,17 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
                attr.ia_size = 0;
        }
 
+       if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
+               d_drop(dentry);
+               switched = true;
+               dentry = d_alloc_parallel(dentry->d_parent,
+                                         &dentry->d_name, &wq);
+               if (IS_ERR(dentry))
+                       return PTR_ERR(dentry);
+               if (unlikely(!d_in_lookup(dentry)))
+                       return finish_no_open(file, dentry);
+       }
+
        ctx = create_nfs_open_context(dentry, open_flags);
        err = PTR_ERR(ctx);
        if (IS_ERR(ctx))
@@ -1563,14 +1576,23 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
        trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
        put_nfs_open_context(ctx);
 out:
+       if (unlikely(switched)) {
+               d_lookup_done(dentry);
+               dput(dentry);
+       }
        return err;
 
 no_open:
        res = nfs_lookup(dir, dentry, lookup_flags);
-       err = PTR_ERR(res);
+       if (switched) {
+               d_lookup_done(dentry);
+               if (!res)
+                       res = dentry;
+               else
+                       dput(dentry);
+       }
        if (IS_ERR(res))
-               goto out;
-
+               return PTR_ERR(res);
        return finish_no_open(file, res);
 }
 EXPORT_SYMBOL_GPL(nfs_atomic_open);
index c2a6b08940228c838375ed9ef37c548c5052a2cc..5c9d2d80ff70bf851e835c97adf775bbac963641 100644 (file)
@@ -505,6 +505,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        struct dentry *upper;
        struct dentry *opaquedir = NULL;
        int err;
+       int flags = 0;
 
        if (WARN_ON(!workdir))
                return -EROFS;
@@ -534,46 +535,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        if (err)
                goto out_dput;
 
-       whiteout = ovl_whiteout(workdir, dentry);
-       err = PTR_ERR(whiteout);
-       if (IS_ERR(whiteout))
+       upper = lookup_one_len(dentry->d_name.name, upperdir,
+                              dentry->d_name.len);
+       err = PTR_ERR(upper);
+       if (IS_ERR(upper))
                goto out_unlock;
 
-       upper = ovl_dentry_upper(dentry);
-       if (!upper) {
-               upper = lookup_one_len(dentry->d_name.name, upperdir,
-                                      dentry->d_name.len);
-               err = PTR_ERR(upper);
-               if (IS_ERR(upper))
-                       goto kill_whiteout;
-
-               err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
-               dput(upper);
-               if (err)
-                       goto kill_whiteout;
-       } else {
-               int flags = 0;
+       err = -ESTALE;
+       if ((opaquedir && upper != opaquedir) ||
+           (!opaquedir && ovl_dentry_upper(dentry) &&
+            upper != ovl_dentry_upper(dentry))) {
+               goto out_dput_upper;
+       }
 
-               if (opaquedir)
-                       upper = opaquedir;
-               err = -ESTALE;
-               if (upper->d_parent != upperdir)
-                       goto kill_whiteout;
+       whiteout = ovl_whiteout(workdir, dentry);
+       err = PTR_ERR(whiteout);
+       if (IS_ERR(whiteout))
+               goto out_dput_upper;
 
-               if (is_dir)
-                       flags |= RENAME_EXCHANGE;
+       if (d_is_dir(upper))
+               flags = RENAME_EXCHANGE;
 
-               err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
-               if (err)
-                       goto kill_whiteout;
+       err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
+       if (err)
+               goto kill_whiteout;
+       if (flags)
+               ovl_cleanup(wdir, upper);
 
-               if (is_dir)
-                       ovl_cleanup(wdir, upper);
-       }
        ovl_dentry_version_inc(dentry->d_parent);
 out_d_drop:
        d_drop(dentry);
        dput(whiteout);
+out_dput_upper:
+       dput(upper);
 out_unlock:
        unlock_rename(workdir, upperdir);
 out_dput:
index c831c2e5f803f26d553264b3a2fc2abbe38803e9..d1cdc60dd68fa25aa74e2474a09e07aab08b7e26 100644 (file)
@@ -80,6 +80,9 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
                                goto out_drop_write;
                }
 
+               if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+                       attr->ia_valid &= ~ATTR_MODE;
+
                inode_lock(upperdentry->d_inode);
                err = notify_change(upperdentry, attr, NULL);
                if (!err)
@@ -410,12 +413,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
        if (!inode)
                return NULL;
 
-       mode &= S_IFMT;
-
        inode->i_ino = get_next_ino();
        inode->i_mode = mode;
        inode->i_flags |= S_NOATIME | S_NOCMTIME;
 
+       mode &= S_IFMT;
        switch (mode) {
        case S_IFDIR:
                inode->i_private = oe;
index 4bd9b5ba8f42008b7f72cf271fac89c67b7eca6d..cfbca53590d078b89ca613e65c3b10486763f656 100644 (file)
@@ -187,6 +187,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
 {
        to->i_uid = from->i_uid;
        to->i_gid = from->i_gid;
+       to->i_mode = from->i_mode;
 }
 
 /* dir.c */
index 933b53a375b4cd872c1e9e8e314ea57a13837980..66215a7b17cf14d0b776dbb8e62b310a1571fdeb 100644 (file)
@@ -1168,6 +1168,15 @@ COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
        return do_compat_preadv64(fd, vec, vlen, pos, 0);
 }
 
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
+COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
+               const struct compat_iovec __user *,vec,
+               unsigned long, vlen, loff_t, pos, int, flags)
+{
+       return do_compat_preadv64(fd, vec, vlen, pos, flags);
+}
+#endif
+
 COMPAT_SYSCALL_DEFINE6(preadv2, compat_ulong_t, fd,
                const struct compat_iovec __user *,vec,
                compat_ulong_t, vlen, u32, pos_low, u32, pos_high,
@@ -1265,6 +1274,15 @@ COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
        return do_compat_pwritev64(fd, vec, vlen, pos, 0);
 }
 
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
+               const struct compat_iovec __user *,vec,
+               unsigned long, vlen, loff_t, pos, int, flags)
+{
+       return do_compat_pwritev64(fd, vec, vlen, pos, flags);
+}
+#endif
+
 COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd,
                const struct compat_iovec __user *,vec,
                compat_ulong_t, vlen, u32, pos_low, u32, pos_high, int, flags)
index 053818dd6c18be8f228e1c40483e50d78aa78007..9ae4abb4110b84ef286facc88e66be9d954a021d 100644 (file)
@@ -390,6 +390,11 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
             clockid != CLOCK_BOOTTIME_ALARM))
                return -EINVAL;
 
+       if (!capable(CAP_WAKE_ALARM) &&
+           (clockid == CLOCK_REALTIME_ALARM ||
+            clockid == CLOCK_BOOTTIME_ALARM))
+               return -EPERM;
+
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
@@ -433,6 +438,11 @@ static int do_timerfd_settime(int ufd, int flags,
                return ret;
        ctx = f.file->private_data;
 
+       if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) {
+               fdput(f);
+               return -EPERM;
+       }
+
        timerfd_setup_cancel(ctx, flags);
 
        /*
index dbca7375deefa3f7d2499f516697ffdd28178546..63a6ff2cfc6821961265136e1f62cf455da3b195 100644 (file)
@@ -1575,6 +1575,12 @@ xfs_ioc_swapext(
                goto out_put_tmp_file;
        }
 
+       if (f.file->f_op != &xfs_file_operations ||
+           tmp.file->f_op != &xfs_file_operations) {
+               error = -EINVAL;
+               goto out_put_tmp_file;
+       }
+
        ip = XFS_I(file_inode(f.file));
        tip = XFS_I(file_inode(tmp.file));
 
index 797ae2ec8eee2d129653514cea7b891db96e38d6..29c691265b49357bc0d036b71897348806c58e6c 100644 (file)
@@ -78,6 +78,7 @@
 
 /* ACPI PCI Interrupt Link (pci_link.c) */
 
+int acpi_irq_penalty_init(void);
 int acpi_pci_link_allocate_irq(acpi_handle handle, int index, int *triggering,
                               int *polarity, char **name);
 int acpi_pci_link_free_irq(acpi_handle handle);
index 4e4c21491c4188a4401887feb55978fe58b1ff47..1ff3a76c265dbcbaea2f427359bb16c75e719200 100644 (file)
@@ -192,7 +192,7 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_do_not_use_xsdt, FALSE);
 /*
  * Optionally support group module level code.
  */
-ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, FALSE);
+ACPI_INIT_GLOBAL(u8, acpi_gbl_group_module_level_code, TRUE);
 
 /*
  * Optionally use 32-bit FADT addresses if and when there is a conflict
index 5e1f345b58ddf3529b074bab33d62bdefcdae5da..288cc9e963953ae182e6b8760663d7b712e834c6 100644 (file)
@@ -112,6 +112,62 @@ static __always_inline void atomic_long_dec(atomic_long_t *l)
        ATOMIC_LONG_PFX(_dec)(v);
 }
 
+#define ATOMIC_LONG_FETCH_OP(op, mo)                                   \
+static inline long                                                     \
+atomic_long_fetch_##op##mo(long i, atomic_long_t *l)                   \
+{                                                                      \
+       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
+                                                                       \
+       return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v);            \
+}
+
+ATOMIC_LONG_FETCH_OP(add, )
+ATOMIC_LONG_FETCH_OP(add, _relaxed)
+ATOMIC_LONG_FETCH_OP(add, _acquire)
+ATOMIC_LONG_FETCH_OP(add, _release)
+ATOMIC_LONG_FETCH_OP(sub, )
+ATOMIC_LONG_FETCH_OP(sub, _relaxed)
+ATOMIC_LONG_FETCH_OP(sub, _acquire)
+ATOMIC_LONG_FETCH_OP(sub, _release)
+ATOMIC_LONG_FETCH_OP(and, )
+ATOMIC_LONG_FETCH_OP(and, _relaxed)
+ATOMIC_LONG_FETCH_OP(and, _acquire)
+ATOMIC_LONG_FETCH_OP(and, _release)
+ATOMIC_LONG_FETCH_OP(andnot, )
+ATOMIC_LONG_FETCH_OP(andnot, _relaxed)
+ATOMIC_LONG_FETCH_OP(andnot, _acquire)
+ATOMIC_LONG_FETCH_OP(andnot, _release)
+ATOMIC_LONG_FETCH_OP(or, )
+ATOMIC_LONG_FETCH_OP(or, _relaxed)
+ATOMIC_LONG_FETCH_OP(or, _acquire)
+ATOMIC_LONG_FETCH_OP(or, _release)
+ATOMIC_LONG_FETCH_OP(xor, )
+ATOMIC_LONG_FETCH_OP(xor, _relaxed)
+ATOMIC_LONG_FETCH_OP(xor, _acquire)
+ATOMIC_LONG_FETCH_OP(xor, _release)
+
+#undef ATOMIC_LONG_FETCH_OP
+
+#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo)                                   \
+static inline long                                                     \
+atomic_long_fetch_##op##mo(atomic_long_t *l)                           \
+{                                                                      \
+       ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;              \
+                                                                       \
+       return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v);               \
+}
+
+ATOMIC_LONG_FETCH_INC_DEC_OP(inc,)
+ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed)
+ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire)
+ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release)
+ATOMIC_LONG_FETCH_INC_DEC_OP(dec,)
+ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed)
+ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire)
+ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release)
+
+#undef ATOMIC_LONG_FETCH_INC_DEC_OP
+
 #define ATOMIC_LONG_OP(op)                                             \
 static __always_inline void                                            \
 atomic_long_##op(long i, atomic_long_t *l)                             \
@@ -124,9 +180,9 @@ atomic_long_##op(long i, atomic_long_t *l)                          \
 ATOMIC_LONG_OP(add)
 ATOMIC_LONG_OP(sub)
 ATOMIC_LONG_OP(and)
+ATOMIC_LONG_OP(andnot)
 ATOMIC_LONG_OP(or)
 ATOMIC_LONG_OP(xor)
-ATOMIC_LONG_OP(andnot)
 
 #undef ATOMIC_LONG_OP
 
index 74f1a3704d7a1ddf61707b6da03fc2348521e4a1..9ed8b987185b45b1157993abf4b0fe5a6c0b23a8 100644 (file)
@@ -61,6 +61,18 @@ static inline int atomic_##op##_return(int i, atomic_t *v)           \
        return c c_op i;                                                \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op)                                      \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       int c, old;                                                     \
+                                                                       \
+       c = v->counter;                                                 \
+       while ((old = cmpxchg(&v->counter, c, c c_op i)) != c)          \
+               c = old;                                                \
+                                                                       \
+       return c;                                                       \
+}
+
 #else
 
 #include <linux/irqflags.h>
@@ -88,6 +100,20 @@ static inline int atomic_##op##_return(int i, atomic_t *v)          \
        return ret;                                                     \
 }
 
+#define ATOMIC_FETCH_OP(op, c_op)                                      \
+static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
+{                                                                      \
+       unsigned long flags;                                            \
+       int ret;                                                        \
+                                                                       \
+       raw_local_irq_save(flags);                                      \
+       ret = v->counter;                                               \
+       v->counter = v->counter c_op i;                                 \
+       raw_local_irq_restore(flags);                                   \
+                                                                       \
+       return ret;                                                     \
+}
+
 #endif /* CONFIG_SMP */
 
 #ifndef atomic_add_return
@@ -98,6 +124,26 @@ ATOMIC_OP_RETURN(add, +)
 ATOMIC_OP_RETURN(sub, -)
 #endif
 
+#ifndef atomic_fetch_add
+ATOMIC_FETCH_OP(add, +)
+#endif
+
+#ifndef atomic_fetch_sub
+ATOMIC_FETCH_OP(sub, -)
+#endif
+
+#ifndef atomic_fetch_and
+ATOMIC_FETCH_OP(and, &)
+#endif
+
+#ifndef atomic_fetch_or
+ATOMIC_FETCH_OP(or, |)
+#endif
+
+#ifndef atomic_fetch_xor
+ATOMIC_FETCH_OP(xor, ^)
+#endif
+
 #ifndef atomic_and
 ATOMIC_OP(and, &)
 #endif
@@ -110,6 +156,7 @@ ATOMIC_OP(or, |)
 ATOMIC_OP(xor, ^)
 #endif
 
+#undef ATOMIC_FETCH_OP
 #undef ATOMIC_OP_RETURN
 #undef ATOMIC_OP
 
index d48e78ccad3dd8aedb3b020d46bdb00312c86772..dad68bf46c77012e144857898093b99dabea1cd7 100644 (file)
@@ -27,16 +27,23 @@ extern void  atomic64_##op(long long a, atomic64_t *v);
 #define ATOMIC64_OP_RETURN(op)                                         \
 extern long long atomic64_##op##_return(long long a, atomic64_t *v);
 
-#define ATOMIC64_OPS(op)       ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
+#define ATOMIC64_FETCH_OP(op)                                          \
+extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
+
+#define ATOMIC64_OPS(op)       ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
 
 ATOMIC64_OPS(add)
 ATOMIC64_OPS(sub)
 
-ATOMIC64_OP(and)
-ATOMIC64_OP(or)
-ATOMIC64_OP(xor)
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op)       ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op)
+
+ATOMIC64_OPS(and)
+ATOMIC64_OPS(or)
+ATOMIC64_OPS(xor)
 
 #undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
index 1cceca146905013dbe2c346020ad73fe3df68ce5..fe297b599b0a1caa8696b8b7c5a5ed1ddab16556 100644 (file)
@@ -194,7 +194,7 @@ do {                                                                        \
 })
 #endif
 
-#endif
+#endif /* CONFIG_SMP */
 
 /* Barriers for virtual machine guests when talking to an SMP host */
 #define virt_mb() __smp_mb()
@@ -207,5 +207,44 @@ do {                                                                       \
 #define virt_store_release(p, v) __smp_store_release(p, v)
 #define virt_load_acquire(p) __smp_load_acquire(p)
 
+/**
+ * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
+ *
+ * A control dependency provides a LOAD->STORE order, the additional RMB
+ * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
+ * aka. (load)-ACQUIRE.
+ *
+ * Architectures that do not do load speculation can have this be barrier().
+ */
+#ifndef smp_acquire__after_ctrl_dep
+#define smp_acquire__after_ctrl_dep()          smp_rmb()
+#endif
+
+/**
+ * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
+ * @ptr: pointer to the variable to wait on
+ * @cond: boolean expression to wait for
+ *
+ * Equivalent to using smp_load_acquire() on the condition variable but employs
+ * the control dependency of the wait to reduce the barrier on many platforms.
+ *
+ * Due to C lacking lambda expressions we load the value of *ptr into a
+ * pre-named variable @VAL to be used in @cond.
+ */
+#ifndef smp_cond_load_acquire
+#define smp_cond_load_acquire(ptr, cond_expr) ({               \
+       typeof(ptr) __PTR = (ptr);                              \
+       typeof(*ptr) VAL;                                       \
+       for (;;) {                                              \
+               VAL = READ_ONCE(*__PTR);                        \
+               if (cond_expr)                                  \
+                       break;                                  \
+               cpu_relax();                                    \
+       }                                                       \
+       smp_acquire__after_ctrl_dep();                          \
+       VAL;                                                    \
+})
+#endif
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __ASM_GENERIC_BARRIER_H */
index 0f1c6f315cdc5294ca63bac49efbf69a5dfee805..a84e28e0c6341031a7a3026f25993efdd833ffe7 100644 (file)
@@ -50,6 +50,8 @@ typedef u64 __nocast cputime64_t;
        (__force u64)(__ct)
 #define nsecs_to_cputime(__nsecs)      \
        (__force cputime_t)(__nsecs)
+#define nsecs_to_cputime64(__nsecs)    \
+       (__force cputime64_t)(__nsecs)
 
 
 /*
index fd694cfd678af712b2c3ca1055331df24a8db8ba..c54829d3de3700ce6df65494b30f3dd1b32f63fe 100644 (file)
@@ -80,7 +80,7 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-       if (likely(atomic_cmpxchg_acquire(count, 1, 0) == 1))
+       if (likely(atomic_read(count) == 1 && atomic_cmpxchg_acquire(count, 1, 0) == 1))
                return 1;
        return 0;
 }
index a6b4a7bd6ac9770356e066c51f295c6b9c33793f..3269ec4e195fbaba5b44fa61b74896ff285ac989 100644 (file)
@@ -91,8 +91,12 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
 static inline int
 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 {
-       int prev = atomic_xchg_acquire(count, 0);
+       int prev;
 
+       if (atomic_read(count) != 1)
+               return 0;
+
+       prev = atomic_xchg_acquire(count, 0);
        if (unlikely(prev < 0)) {
                /*
                 * The lock was marked contended so we must restore that
index 05f05f17a7c2e4fc14f8fa858abc9f7d7715c7b9..9f0681bf1e87d6ec0b2a224168fccc944286f29e 100644 (file)
@@ -111,10 +111,9 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
 static __always_inline void queued_spin_unlock(struct qspinlock *lock)
 {
        /*
-        * smp_mb__before_atomic() in order to guarantee release semantics
+        * unlock() needs release semantics:
         */
-       smp_mb__before_atomic();
-       atomic_sub(_Q_LOCKED_VAL, &lock->val);
+       (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
 }
 #endif
 
index 3fc94a046bf584374b15070f68074c64a3daaf14..5be122e3d32605ad9e0f809b23ba5349d5118de4 100644 (file)
@@ -41,8 +41,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
 {
        long tmp;
 
-       while ((tmp = sem->count) >= 0) {
-               if (tmp == cmpxchg_acquire(&sem->count, tmp,
+       while ((tmp = atomic_long_read(&sem->count)) >= 0) {
+               if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
                                   tmp + RWSEM_ACTIVE_READ_BIAS)) {
                        return 1;
                }
@@ -79,7 +79,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
 {
        long tmp;
 
-       tmp = cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
+       tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
                      RWSEM_ACTIVE_WRITE_BIAS);
        return tmp == RWSEM_UNLOCKED_VALUE;
 }
@@ -106,14 +106,6 @@ static inline void __up_write(struct rw_semaphore *sem)
                rwsem_wake(sem);
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-       atomic_long_add(delta, (atomic_long_t *)&sem->count);
-}
-
 /*
  * downgrade write lock to read lock
  */
@@ -134,13 +126,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-       return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_GENERIC_RWSEM_H */
index 6a67ab94b553363934bc9c2e07eb12d6c8a977f7..081d0f258d4c98d3cb36615c2dd252b630052b99 100644 (file)
 
 #define INIT_TEXT                                                      \
        *(.init.text)                                                   \
+       *(.text.startup)                                                \
        MEM_DISCARD(init.text)
 
 #define EXIT_DATA                                                      \
        *(.exit.data)                                                   \
+       *(.fini_array)                                                  \
+       *(.dtors)                                                       \
        MEM_DISCARD(exit.data)                                          \
        MEM_DISCARD(exit.rodata)
 
 #define EXIT_TEXT                                                      \
        *(.exit.text)                                                   \
+       *(.text.exit)                                                   \
        MEM_DISCARD(exit.text)
 
 #define EXIT_CALL                                                      \
index 1f8a1caa7cb4d4b6d9900a34aed168c3011930ae..7654d71243dd7104dc9f1d07647020500d5481b7 100644 (file)
@@ -3,10 +3,10 @@
 
 struct clk;
 
-void __sp804_clocksource_and_sched_clock_init(void __iomem *,
-                                             const char *, struct clk *, int);
-void __sp804_clockevents_init(void __iomem *, unsigned int,
-                             struct clk *, const char *);
+int __sp804_clocksource_and_sched_clock_init(void __iomem *,
+                                            const char *, struct clk *, int);
+int __sp804_clockevents_init(void __iomem *, unsigned int,
+                            struct clk *, const char *);
 void sp804_timer_disable(void __iomem *);
 
 static inline void sp804_clocksource_init(void __iomem *base, const char *name)
index c801d9028e37388bc30a8254be8c4ae7df6acb22..4cecb0b75b9cb702ba2189f77b1ebab78d5bd6ab 100644 (file)
@@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
  */
 extern int ttm_bo_wait(struct ttm_buffer_object *bo,
                       bool interruptible, bool no_wait);
+
+/**
+ * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
+ *
+ * @placement:  Return immediately if buffer is busy.
+ * @mem:  The struct ttm_mem_reg indicating the region where the bo resides
+ * @new_flags: Describes compatible placement found
+ *
+ * Returns true if the placement is compatible
+ */
+extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
+                             struct ttm_mem_reg *mem,
+                             uint32_t *new_flags);
+
 /**
  * ttm_bo_validate
  *
index 52f3b7da4f2d4dbb181aa1fe5c8d73e4df8f1814..9d8031257a90d8a371b27b608f8e8fbf532bf974 100644 (file)
@@ -26,10 +26,10 @@ enum alarmtimer_restart {
  * struct alarm - Alarm timer structure
  * @node:      timerqueue node for adding to the event list this value
  *             also includes the expiration time.
- * @period:    Period for recuring alarms
+ * @timer:     hrtimer used to schedule events while running
  * @function:  Function pointer to be executed when the timer fires.
- * @type:      Alarm type (BOOTTIME/REALTIME)
- * @enabled:   Flag that represents if the alarm is set to fire or not
+ * @type:      Alarm type (BOOTTIME/REALTIME).
+ * @state:     Flag that represents if the alarm is set to fire or not.
  * @data:      Internal data value.
  */
 struct alarm {
index e451534fe54d9184fbdaab4c8684c1e5a288ff39..e71835bf60a977a37277d44c6357a02e70c3a41d 100644 (file)
 #endif
 #endif /* atomic_dec_return_relaxed */
 
-/* atomic_xchg_relaxed */
-#ifndef atomic_xchg_relaxed
-#define  atomic_xchg_relaxed           atomic_xchg
-#define  atomic_xchg_acquire           atomic_xchg
-#define  atomic_xchg_release           atomic_xchg
 
-#else /* atomic_xchg_relaxed */
+/* atomic_fetch_add_relaxed */
+#ifndef atomic_fetch_add_relaxed
+#define atomic_fetch_add_relaxed       atomic_fetch_add
+#define atomic_fetch_add_acquire       atomic_fetch_add
+#define atomic_fetch_add_release       atomic_fetch_add
 
-#ifndef atomic_xchg_acquire
-#define  atomic_xchg_acquire(...)                                      \
-       __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
+#else /* atomic_fetch_add_relaxed */
+
+#ifndef atomic_fetch_add_acquire
+#define atomic_fetch_add_acquire(...)                                  \
+       __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__)
 #endif
 
-#ifndef atomic_xchg_release
-#define  atomic_xchg_release(...)                                      \
-       __atomic_op_release(atomic_xchg, __VA_ARGS__)
+#ifndef atomic_fetch_add_release
+#define atomic_fetch_add_release(...)                                  \
+       __atomic_op_release(atomic_fetch_add, __VA_ARGS__)
 #endif
 
-#ifndef atomic_xchg
-#define  atomic_xchg(...)                                              \
-       __atomic_op_fence(atomic_xchg, __VA_ARGS__)
+#ifndef atomic_fetch_add
+#define atomic_fetch_add(...)                                          \
+       __atomic_op_fence(atomic_fetch_add, __VA_ARGS__)
+#endif
+#endif /* atomic_fetch_add_relaxed */
+
+/* atomic_fetch_inc_relaxed */
+#ifndef atomic_fetch_inc_relaxed
+
+#ifndef atomic_fetch_inc
+#define atomic_fetch_inc(v)            atomic_fetch_add(1, (v))
+#define atomic_fetch_inc_relaxed(v)    atomic_fetch_add_relaxed(1, (v))
+#define atomic_fetch_inc_acquire(v)    atomic_fetch_add_acquire(1, (v))
+#define atomic_fetch_inc_release(v)    atomic_fetch_add_release(1, (v))
+#else /* atomic_fetch_inc */
+#define atomic_fetch_inc_relaxed       atomic_fetch_inc
+#define atomic_fetch_inc_acquire       atomic_fetch_inc
+#define atomic_fetch_inc_release       atomic_fetch_inc
+#endif /* atomic_fetch_inc */
+
+#else /* atomic_fetch_inc_relaxed */
+
+#ifndef atomic_fetch_inc_acquire
+#define atomic_fetch_inc_acquire(...)                                  \
+       __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__)
 #endif
-#endif /* atomic_xchg_relaxed */
 
-/* atomic_cmpxchg_relaxed */
-#ifndef atomic_cmpxchg_relaxed
-#define  atomic_cmpxchg_relaxed                atomic_cmpxchg
-#define  atomic_cmpxchg_acquire                atomic_cmpxchg
-#define  atomic_cmpxchg_release                atomic_cmpxchg
+#ifndef atomic_fetch_inc_release
+#define atomic_fetch_inc_release(...)                                  \
+       __atomic_op_release(atomic_fetch_inc, __VA_ARGS__)
+#endif
 
-#else /* atomic_cmpxchg_relaxed */
+#ifndef atomic_fetch_inc
+#define atomic_fetch_inc(...)                                          \
+       __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__)
+#endif
+#endif /* atomic_fetch_inc_relaxed */
 
-#ifndef atomic_cmpxchg_acquire
-#define  atomic_cmpxchg_acquire(...)                                   \
-       __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
+/* atomic_fetch_sub_relaxed */
+#ifndef atomic_fetch_sub_relaxed
+#define atomic_fetch_sub_relaxed       atomic_fetch_sub
+#define atomic_fetch_sub_acquire       atomic_fetch_sub
+#define atomic_fetch_sub_release       atomic_fetch_sub
+
+#else /* atomic_fetch_sub_relaxed */
+
+#ifndef atomic_fetch_sub_acquire
+#define atomic_fetch_sub_acquire(...)                                  \
+       __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__)
 #endif
 
-#ifndef atomic_cmpxchg_release
-#define  atomic_cmpxchg_release(...)                                   \
-       __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
+#ifndef atomic_fetch_sub_release
+#define atomic_fetch_sub_release(...)                                  \
+       __atomic_op_release(atomic_fetch_sub, __VA_ARGS__)
 #endif
 
-#ifndef atomic_cmpxchg
-#define  atomic_cmpxchg(...)                                           \
-       __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
+#ifndef atomic_fetch_sub
+#define atomic_fetch_sub(...)                                          \
+       __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__)
+#endif
+#endif /* atomic_fetch_sub_relaxed */
+
+/* atomic_fetch_dec_relaxed */
+#ifndef atomic_fetch_dec_relaxed
+
+#ifndef atomic_fetch_dec
+#define atomic_fetch_dec(v)            atomic_fetch_sub(1, (v))
+#define atomic_fetch_dec_relaxed(v)    atomic_fetch_sub_relaxed(1, (v))
+#define atomic_fetch_dec_acquire(v)    atomic_fetch_sub_acquire(1, (v))
+#define atomic_fetch_dec_release(v)    atomic_fetch_sub_release(1, (v))
+#else /* atomic_fetch_dec */
+#define atomic_fetch_dec_relaxed       atomic_fetch_dec
+#define atomic_fetch_dec_acquire       atomic_fetch_dec
+#define atomic_fetch_dec_release       atomic_fetch_dec
+#endif /* atomic_fetch_dec */
+
+#else /* atomic_fetch_dec_relaxed */
+
+#ifndef atomic_fetch_dec_acquire
+#define atomic_fetch_dec_acquire(...)                                  \
+       __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__)
 #endif
-#endif /* atomic_cmpxchg_relaxed */
 
-#ifndef atomic64_read_acquire
-#define  atomic64_read_acquire(v)      smp_load_acquire(&(v)->counter)
+#ifndef atomic_fetch_dec_release
+#define atomic_fetch_dec_release(...)                                  \
+       __atomic_op_release(atomic_fetch_dec, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_set_release
-#define  atomic64_set_release(v, i)    smp_store_release(&(v)->counter, (i))
+#ifndef atomic_fetch_dec
+#define atomic_fetch_dec(...)                                          \
+       __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__)
 #endif
+#endif /* atomic_fetch_dec_relaxed */
 
-/* atomic64_add_return_relaxed */
-#ifndef atomic64_add_return_relaxed
-#define  atomic64_add_return_relaxed   atomic64_add_return
-#define  atomic64_add_return_acquire   atomic64_add_return
-#define  atomic64_add_return_release   atomic64_add_return
+/* atomic_fetch_or_relaxed */
+#ifndef atomic_fetch_or_relaxed
+#define atomic_fetch_or_relaxed        atomic_fetch_or
+#define atomic_fetch_or_acquire        atomic_fetch_or
+#define atomic_fetch_or_release        atomic_fetch_or
 
-#else /* atomic64_add_return_relaxed */
+#else /* atomic_fetch_or_relaxed */
 
-#ifndef atomic64_add_return_acquire
-#define  atomic64_add_return_acquire(...)                              \
-       __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#ifndef atomic_fetch_or_acquire
+#define atomic_fetch_or_acquire(...)                                   \
+       __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_add_return_release
-#define  atomic64_add_return_release(...)                              \
-       __atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#ifndef atomic_fetch_or_release
+#define atomic_fetch_or_release(...)                                   \
+       __atomic_op_release(atomic_fetch_or, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_add_return
-#define  atomic64_add_return(...)                                      \
-       __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#ifndef atomic_fetch_or
+#define atomic_fetch_or(...)                                           \
+       __atomic_op_fence(atomic_fetch_or, __VA_ARGS__)
 #endif
-#endif /* atomic64_add_return_relaxed */
+#endif /* atomic_fetch_or_relaxed */
 
-/* atomic64_inc_return_relaxed */
-#ifndef atomic64_inc_return_relaxed
-#define  atomic64_inc_return_relaxed   atomic64_inc_return
-#define  atomic64_inc_return_acquire   atomic64_inc_return
-#define  atomic64_inc_return_release   atomic64_inc_return
+/* atomic_fetch_and_relaxed */
+#ifndef atomic_fetch_and_relaxed
+#define atomic_fetch_and_relaxed       atomic_fetch_and
+#define atomic_fetch_and_acquire       atomic_fetch_and
+#define atomic_fetch_and_release       atomic_fetch_and
 
-#else /* atomic64_inc_return_relaxed */
+#else /* atomic_fetch_and_relaxed */
 
-#ifndef atomic64_inc_return_acquire
-#define  atomic64_inc_return_acquire(...)                              \
-       __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
+#ifndef atomic_fetch_and_acquire
+#define atomic_fetch_and_acquire(...)                                  \
+       __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_inc_return_release
-#define  atomic64_inc_return_release(...)                              \
-       __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
+#ifndef atomic_fetch_and_release
+#define atomic_fetch_and_release(...)                                  \
+       __atomic_op_release(atomic_fetch_and, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_inc_return
-#define  atomic64_inc_return(...)                                      \
-       __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
+#ifndef atomic_fetch_and
+#define atomic_fetch_and(...)                                          \
+       __atomic_op_fence(atomic_fetch_and, __VA_ARGS__)
 #endif
-#endif /* atomic64_inc_return_relaxed */
-
+#endif /* atomic_fetch_and_relaxed */
 
-/* atomic64_sub_return_relaxed */
-#ifndef atomic64_sub_return_relaxed
-#define  atomic64_sub_return_relaxed   atomic64_sub_return
-#define  atomic64_sub_return_acquire   atomic64_sub_return
-#define  atomic64_sub_return_release   atomic64_sub_return
+#ifdef atomic_andnot
+/* atomic_fetch_andnot_relaxed */
+#ifndef atomic_fetch_andnot_relaxed
+#define atomic_fetch_andnot_relaxed    atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire    atomic_fetch_andnot
+#define atomic_fetch_andnot_release    atomic_fetch_andnot
 
-#else /* atomic64_sub_return_relaxed */
+#else /* atomic_fetch_andnot_relaxed */
 
-#ifndef atomic64_sub_return_acquire
-#define  atomic64_sub_return_acquire(...)                              \
-       __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
+#ifndef atomic_fetch_andnot_acquire
+#define atomic_fetch_andnot_acquire(...)                                       \
+       __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_sub_return_release
-#define  atomic64_sub_return_release(...)                              \
-       __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#ifndef atomic_fetch_andnot_release
+#define atomic_fetch_andnot_release(...)                                       \
+       __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_sub_return
-#define  atomic64_sub_return(...)                                      \
-       __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#ifndef atomic_fetch_andnot
+#define atomic_fetch_andnot(...)                                               \
+       __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
 #endif
-#endif /* atomic64_sub_return_relaxed */
+#endif /* atomic_fetch_andnot_relaxed */
+#endif /* atomic_andnot */
 
-/* atomic64_dec_return_relaxed */
-#ifndef atomic64_dec_return_relaxed
-#define  atomic64_dec_return_relaxed   atomic64_dec_return
-#define  atomic64_dec_return_acquire   atomic64_dec_return
-#define  atomic64_dec_return_release   atomic64_dec_return
+/* atomic_fetch_xor_relaxed */
+#ifndef atomic_fetch_xor_relaxed
+#define atomic_fetch_xor_relaxed       atomic_fetch_xor
+#define atomic_fetch_xor_acquire       atomic_fetch_xor
+#define atomic_fetch_xor_release       atomic_fetch_xor
 
-#else /* atomic64_dec_return_relaxed */
+#else /* atomic_fetch_xor_relaxed */
 
-#ifndef atomic64_dec_return_acquire
-#define  atomic64_dec_return_acquire(...)                              \
-       __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
+#ifndef atomic_fetch_xor_acquire
+#define atomic_fetch_xor_acquire(...)                                  \
+       __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_dec_return_release
-#define  atomic64_dec_return_release(...)                              \
-       __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
+#ifndef atomic_fetch_xor_release
+#define atomic_fetch_xor_release(...)                                  \
+       __atomic_op_release(atomic_fetch_xor, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_dec_return
-#define  atomic64_dec_return(...)                                      \
-       __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
+#ifndef atomic_fetch_xor
+#define atomic_fetch_xor(...)                                          \
+       __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__)
 #endif
-#endif /* atomic64_dec_return_relaxed */
+#endif /* atomic_fetch_xor_relaxed */
 
-/* atomic64_xchg_relaxed */
-#ifndef atomic64_xchg_relaxed
-#define  atomic64_xchg_relaxed         atomic64_xchg
-#define  atomic64_xchg_acquire         atomic64_xchg
-#define  atomic64_xchg_release         atomic64_xchg
 
-#else /* atomic64_xchg_relaxed */
+/* atomic_xchg_relaxed */
+#ifndef atomic_xchg_relaxed
+#define  atomic_xchg_relaxed           atomic_xchg
+#define  atomic_xchg_acquire           atomic_xchg
+#define  atomic_xchg_release           atomic_xchg
 
-#ifndef atomic64_xchg_acquire
-#define  atomic64_xchg_acquire(...)                                    \
-       __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#else /* atomic_xchg_relaxed */
+
+#ifndef atomic_xchg_acquire
+#define  atomic_xchg_acquire(...)                                      \
+       __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_xchg_release
-#define  atomic64_xchg_release(...)                                    \
-       __atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#ifndef atomic_xchg_release
+#define  atomic_xchg_release(...)                                      \
+       __atomic_op_release(atomic_xchg, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_xchg
-#define  atomic64_xchg(...)                                            \
-       __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#ifndef atomic_xchg
+#define  atomic_xchg(...)                                              \
+       __atomic_op_fence(atomic_xchg, __VA_ARGS__)
 #endif
-#endif /* atomic64_xchg_relaxed */
+#endif /* atomic_xchg_relaxed */
 
-/* atomic64_cmpxchg_relaxed */
-#ifndef atomic64_cmpxchg_relaxed
-#define  atomic64_cmpxchg_relaxed      atomic64_cmpxchg
-#define  atomic64_cmpxchg_acquire      atomic64_cmpxchg
-#define  atomic64_cmpxchg_release      atomic64_cmpxchg
+/* atomic_cmpxchg_relaxed */
+#ifndef atomic_cmpxchg_relaxed
+#define  atomic_cmpxchg_relaxed                atomic_cmpxchg
+#define  atomic_cmpxchg_acquire                atomic_cmpxchg
+#define  atomic_cmpxchg_release                atomic_cmpxchg
 
-#else /* atomic64_cmpxchg_relaxed */
+#else /* atomic_cmpxchg_relaxed */
 
-#ifndef atomic64_cmpxchg_acquire
-#define  atomic64_cmpxchg_acquire(...)                                 \
-       __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#ifndef atomic_cmpxchg_acquire
+#define  atomic_cmpxchg_acquire(...)                                   \
+       __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_cmpxchg_release
-#define  atomic64_cmpxchg_release(...)                                 \
-       __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#ifndef atomic_cmpxchg_release
+#define  atomic_cmpxchg_release(...)                                   \
+       __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
 #endif
 
-#ifndef atomic64_cmpxchg
-#define  atomic64_cmpxchg(...)                                         \
-       __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#ifndef atomic_cmpxchg
+#define  atomic_cmpxchg(...)                                           \
+       __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
 #endif
-#endif /* atomic64_cmpxchg_relaxed */
+#endif /* atomic_cmpxchg_relaxed */
 
 /* cmpxchg_relaxed */
 #ifndef cmpxchg_relaxed
@@ -463,18 +522,28 @@ static inline void atomic_andnot(int i, atomic_t *v)
 {
        atomic_and(~i, v);
 }
-#endif
 
-static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
+static inline int atomic_fetch_andnot(int i, atomic_t *v)
 {
-       atomic_andnot(mask, v);
+       return atomic_fetch_and(~i, v);
 }
 
-static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
+static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
 {
-       atomic_or(mask, v);
+       return atomic_fetch_and_relaxed(~i, v);
 }
 
+static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+       return atomic_fetch_and_acquire(~i, v);
+}
+
+static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+       return atomic_fetch_and_release(~i, v);
+}
+#endif
+
 /**
  * atomic_inc_not_zero_hint - increment if not null
  * @v: pointer of type atomic_t
@@ -558,36 +627,400 @@ static inline int atomic_dec_if_positive(atomic_t *v)
 }
 #endif
 
-/**
- * atomic_fetch_or - perform *p |= mask and return old value of *p
- * @mask: mask to OR on the atomic_t
- * @p: pointer to atomic_t
- */
-#ifndef atomic_fetch_or
-static inline int atomic_fetch_or(int mask, atomic_t *p)
-{
-       int old, val = atomic_read(p);
+#ifdef CONFIG_GENERIC_ATOMIC64
+#include <asm-generic/atomic64.h>
+#endif
 
-       for (;;) {
-               old = atomic_cmpxchg(p, val, val | mask);
-               if (old == val)
-                       break;
-               val = old;
-       }
+#ifndef atomic64_read_acquire
+#define  atomic64_read_acquire(v)      smp_load_acquire(&(v)->counter)
+#endif
 
-       return old;
-}
+#ifndef atomic64_set_release
+#define  atomic64_set_release(v, i)    smp_store_release(&(v)->counter, (i))
 #endif
 
-#ifdef CONFIG_GENERIC_ATOMIC64
-#include <asm-generic/atomic64.h>
+/* atomic64_add_return_relaxed */
+#ifndef atomic64_add_return_relaxed
+#define  atomic64_add_return_relaxed   atomic64_add_return
+#define  atomic64_add_return_acquire   atomic64_add_return
+#define  atomic64_add_return_release   atomic64_add_return
+
+#else /* atomic64_add_return_relaxed */
+
+#ifndef atomic64_add_return_acquire
+#define  atomic64_add_return_acquire(...)                              \
+       __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return_release
+#define  atomic64_add_return_release(...)                              \
+       __atomic_op_release(atomic64_add_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_add_return
+#define  atomic64_add_return(...)                                      \
+       __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_add_return_relaxed */
+
+/* atomic64_inc_return_relaxed */
+#ifndef atomic64_inc_return_relaxed
+#define  atomic64_inc_return_relaxed   atomic64_inc_return
+#define  atomic64_inc_return_acquire   atomic64_inc_return
+#define  atomic64_inc_return_release   atomic64_inc_return
+
+#else /* atomic64_inc_return_relaxed */
+
+#ifndef atomic64_inc_return_acquire
+#define  atomic64_inc_return_acquire(...)                              \
+       __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return_release
+#define  atomic64_inc_return_release(...)                              \
+       __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_inc_return
+#define  atomic64_inc_return(...)                                      \
+       __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_inc_return_relaxed */
+
+
+/* atomic64_sub_return_relaxed */
+#ifndef atomic64_sub_return_relaxed
+#define  atomic64_sub_return_relaxed   atomic64_sub_return
+#define  atomic64_sub_return_acquire   atomic64_sub_return
+#define  atomic64_sub_return_release   atomic64_sub_return
+
+#else /* atomic64_sub_return_relaxed */
+
+#ifndef atomic64_sub_return_acquire
+#define  atomic64_sub_return_acquire(...)                              \
+       __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
 #endif
 
+#ifndef atomic64_sub_return_release
+#define  atomic64_sub_return_release(...)                              \
+       __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_sub_return
+#define  atomic64_sub_return(...)                                      \
+       __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_sub_return_relaxed */
+
+/* atomic64_dec_return_relaxed */
+#ifndef atomic64_dec_return_relaxed
+#define  atomic64_dec_return_relaxed   atomic64_dec_return
+#define  atomic64_dec_return_acquire   atomic64_dec_return
+#define  atomic64_dec_return_release   atomic64_dec_return
+
+#else /* atomic64_dec_return_relaxed */
+
+#ifndef atomic64_dec_return_acquire
+#define  atomic64_dec_return_acquire(...)                              \
+       __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return_release
+#define  atomic64_dec_return_release(...)                              \
+       __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_dec_return
+#define  atomic64_dec_return(...)                                      \
+       __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
+#endif
+#endif /* atomic64_dec_return_relaxed */
+
+
+/* atomic64_fetch_add_relaxed */
+#ifndef atomic64_fetch_add_relaxed
+#define atomic64_fetch_add_relaxed     atomic64_fetch_add
+#define atomic64_fetch_add_acquire     atomic64_fetch_add
+#define atomic64_fetch_add_release     atomic64_fetch_add
+
+#else /* atomic64_fetch_add_relaxed */
+
+#ifndef atomic64_fetch_add_acquire
+#define atomic64_fetch_add_acquire(...)                                        \
+       __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_add_release
+#define atomic64_fetch_add_release(...)                                        \
+       __atomic_op_release(atomic64_fetch_add, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_add
+#define atomic64_fetch_add(...)                                                \
+       __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_add_relaxed */
+
+/* atomic64_fetch_inc_relaxed */
+#ifndef atomic64_fetch_inc_relaxed
+
+#ifndef atomic64_fetch_inc
+#define atomic64_fetch_inc(v)          atomic64_fetch_add(1, (v))
+#define atomic64_fetch_inc_relaxed(v)  atomic64_fetch_add_relaxed(1, (v))
+#define atomic64_fetch_inc_acquire(v)  atomic64_fetch_add_acquire(1, (v))
+#define atomic64_fetch_inc_release(v)  atomic64_fetch_add_release(1, (v))
+#else /* atomic64_fetch_inc */
+#define atomic64_fetch_inc_relaxed     atomic64_fetch_inc
+#define atomic64_fetch_inc_acquire     atomic64_fetch_inc
+#define atomic64_fetch_inc_release     atomic64_fetch_inc
+#endif /* atomic64_fetch_inc */
+
+#else /* atomic64_fetch_inc_relaxed */
+
+#ifndef atomic64_fetch_inc_acquire
+#define atomic64_fetch_inc_acquire(...)                                        \
+       __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_inc_release
+#define atomic64_fetch_inc_release(...)                                        \
+       __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_inc
+#define atomic64_fetch_inc(...)                                                \
+       __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_inc_relaxed */
+
+/* atomic64_fetch_sub_relaxed */
+#ifndef atomic64_fetch_sub_relaxed
+#define atomic64_fetch_sub_relaxed     atomic64_fetch_sub
+#define atomic64_fetch_sub_acquire     atomic64_fetch_sub
+#define atomic64_fetch_sub_release     atomic64_fetch_sub
+
+#else /* atomic64_fetch_sub_relaxed */
+
+#ifndef atomic64_fetch_sub_acquire
+#define atomic64_fetch_sub_acquire(...)                                        \
+       __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_sub_release
+#define atomic64_fetch_sub_release(...)                                        \
+       __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_sub
+#define atomic64_fetch_sub(...)                                                \
+       __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_sub_relaxed */
+
+/* atomic64_fetch_dec_relaxed */
+#ifndef atomic64_fetch_dec_relaxed
+
+#ifndef atomic64_fetch_dec
+#define atomic64_fetch_dec(v)          atomic64_fetch_sub(1, (v))
+#define atomic64_fetch_dec_relaxed(v)  atomic64_fetch_sub_relaxed(1, (v))
+#define atomic64_fetch_dec_acquire(v)  atomic64_fetch_sub_acquire(1, (v))
+#define atomic64_fetch_dec_release(v)  atomic64_fetch_sub_release(1, (v))
+#else /* atomic64_fetch_dec */
+#define atomic64_fetch_dec_relaxed     atomic64_fetch_dec
+#define atomic64_fetch_dec_acquire     atomic64_fetch_dec
+#define atomic64_fetch_dec_release     atomic64_fetch_dec
+#endif /* atomic64_fetch_dec */
+
+#else /* atomic64_fetch_dec_relaxed */
+
+#ifndef atomic64_fetch_dec_acquire
+#define atomic64_fetch_dec_acquire(...)                                        \
+       __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_dec_release
+#define atomic64_fetch_dec_release(...)                                        \
+       __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_dec
+#define atomic64_fetch_dec(...)                                                \
+       __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_dec_relaxed */
+
+/* atomic64_fetch_or_relaxed */
+#ifndef atomic64_fetch_or_relaxed
+#define atomic64_fetch_or_relaxed      atomic64_fetch_or
+#define atomic64_fetch_or_acquire      atomic64_fetch_or
+#define atomic64_fetch_or_release      atomic64_fetch_or
+
+#else /* atomic64_fetch_or_relaxed */
+
+#ifndef atomic64_fetch_or_acquire
+#define atomic64_fetch_or_acquire(...)                                 \
+       __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_or_release
+#define atomic64_fetch_or_release(...)                                 \
+       __atomic_op_release(atomic64_fetch_or, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_or
+#define atomic64_fetch_or(...)                                         \
+       __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_or_relaxed */
+
+/* atomic64_fetch_and_relaxed */
+#ifndef atomic64_fetch_and_relaxed
+#define atomic64_fetch_and_relaxed     atomic64_fetch_and
+#define atomic64_fetch_and_acquire     atomic64_fetch_and
+#define atomic64_fetch_and_release     atomic64_fetch_and
+
+#else /* atomic64_fetch_and_relaxed */
+
+#ifndef atomic64_fetch_and_acquire
+#define atomic64_fetch_and_acquire(...)                                        \
+       __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_and_release
+#define atomic64_fetch_and_release(...)                                        \
+       __atomic_op_release(atomic64_fetch_and, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_and
+#define atomic64_fetch_and(...)                                                \
+       __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_and_relaxed */
+
+#ifdef atomic64_andnot
+/* atomic64_fetch_andnot_relaxed */
+#ifndef atomic64_fetch_andnot_relaxed
+#define atomic64_fetch_andnot_relaxed  atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire  atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release  atomic64_fetch_andnot
+
+#else /* atomic64_fetch_andnot_relaxed */
+
+#ifndef atomic64_fetch_andnot_acquire
+#define atomic64_fetch_andnot_acquire(...)                                     \
+       __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_andnot_release
+#define atomic64_fetch_andnot_release(...)                                     \
+       __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_andnot
+#define atomic64_fetch_andnot(...)                                             \
+       __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_andnot_relaxed */
+#endif /* atomic64_andnot */
+
+/* atomic64_fetch_xor_relaxed */
+#ifndef atomic64_fetch_xor_relaxed
+#define atomic64_fetch_xor_relaxed     atomic64_fetch_xor
+#define atomic64_fetch_xor_acquire     atomic64_fetch_xor
+#define atomic64_fetch_xor_release     atomic64_fetch_xor
+
+#else /* atomic64_fetch_xor_relaxed */
+
+#ifndef atomic64_fetch_xor_acquire
+#define atomic64_fetch_xor_acquire(...)                                        \
+       __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_xor_release
+#define atomic64_fetch_xor_release(...)                                        \
+       __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_fetch_xor
+#define atomic64_fetch_xor(...)                                                \
+       __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__)
+#endif
+#endif /* atomic64_fetch_xor_relaxed */
+
+
+/* atomic64_xchg_relaxed */
+#ifndef atomic64_xchg_relaxed
+#define  atomic64_xchg_relaxed         atomic64_xchg
+#define  atomic64_xchg_acquire         atomic64_xchg
+#define  atomic64_xchg_release         atomic64_xchg
+
+#else /* atomic64_xchg_relaxed */
+
+#ifndef atomic64_xchg_acquire
+#define  atomic64_xchg_acquire(...)                                    \
+       __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg_release
+#define  atomic64_xchg_release(...)                                    \
+       __atomic_op_release(atomic64_xchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_xchg
+#define  atomic64_xchg(...)                                            \
+       __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_xchg_relaxed */
+
+/* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_cmpxchg_relaxed
+#define  atomic64_cmpxchg_relaxed      atomic64_cmpxchg
+#define  atomic64_cmpxchg_acquire      atomic64_cmpxchg
+#define  atomic64_cmpxchg_release      atomic64_cmpxchg
+
+#else /* atomic64_cmpxchg_relaxed */
+
+#ifndef atomic64_cmpxchg_acquire
+#define  atomic64_cmpxchg_acquire(...)                                 \
+       __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg_release
+#define  atomic64_cmpxchg_release(...)                                 \
+       __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+
+#ifndef atomic64_cmpxchg
+#define  atomic64_cmpxchg(...)                                         \
+       __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
+#endif
+#endif /* atomic64_cmpxchg_relaxed */
+
 #ifndef atomic64_andnot
 static inline void atomic64_andnot(long long i, atomic64_t *v)
 {
        atomic64_and(~i, v);
 }
+
+static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
+{
+       return atomic64_fetch_and(~i, v);
+}
+
+static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
+{
+       return atomic64_fetch_and_relaxed(~i, v);
+}
+
+static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
+{
+       return atomic64_fetch_and_acquire(~i, v);
+}
+
+static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
+{
+       return atomic64_fetch_and_release(~i, v);
+}
 #endif
 
 #include <asm-generic/atomic-long.h>
index e6b41f42602ba7cc32e2945e15309d06f3028f95..3db25df396cbb88d2135874818f4fb38a71b2eee 100644 (file)
@@ -159,6 +159,7 @@ struct bcma_host_ops {
 #define BCMA_CORE_DEFAULT              0xFFF
 
 #define BCMA_MAX_NR_CORES              16
+#define BCMA_CORE_SIZE                 0x1000
 
 /* Chip IDs of PCIe devices */
 #define BCMA_CHIP_ID_BCM4313   0x4313
index d48daa3f6f20f7b7d3da0c5396168a277a239d14..7e14e545c4b6aa24218f680f978ae7818d725ee1 100644 (file)
@@ -208,6 +208,9 @@ void block_invalidatepage(struct page *page, unsigned int offset,
                          unsigned int length);
 int block_write_full_page(struct page *page, get_block_t *get_block,
                                struct writeback_control *wbc);
+int __block_write_full_page(struct inode *inode, struct page *page,
+                       get_block_t *get_block, struct writeback_control *wbc,
+                       bh_end_io_t *handler);
 int block_read_full_page(struct page*, get_block_t*);
 int block_is_partially_uptodate(struct page *page, unsigned long from,
                                unsigned long count);
index 0df4a51e1a78d2bd74ac899c655bce9659ea50da..834179f3fa72a4508509f0a3991385f63cfed417 100644 (file)
@@ -461,6 +461,10 @@ static inline struct clk *clk_get_parent(struct clk *clk)
        return NULL;
 }
 
+static inline struct clk *clk_get_sys(const char *dev_id, const char *con_id)
+{
+       return NULL;
+}
 #endif
 
 /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */
index 44a1aff225666108c91a8d9f61081315b75d0617..08398182f56ecae83ba24fc3be1707a729e0760d 100644 (file)
@@ -244,7 +244,7 @@ extern int clocksource_mmio_init(void __iomem *, const char *,
 extern int clocksource_i8253_init(void);
 
 #define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
-       OF_DECLARE_1(clksrc, name, compat, fn)
+       OF_DECLARE_1_RET(clksrc, name, compat, fn)
 
 #ifdef CONFIG_CLKSRC_PROBE
 extern void clocksource_probe(void);
index 793c0829e3a3909dd532c972e513c7b8164a16ed..2e853b679a5da9c4061053b714801d7434728154 100644 (file)
@@ -304,23 +304,6 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
        __u.__val;                                      \
 })
 
-/**
- * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering
- * @cond: boolean expression to wait for
- *
- * Equivalent to using smp_load_acquire() on the condition variable but employs
- * the control dependency of the wait to reduce the barrier on many platforms.
- *
- * The control dependency provides a LOAD->STORE order, the additional RMB
- * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
- * aka. ACQUIRE.
- */
-#define smp_cond_acquire(cond) do {            \
-       while (!(cond))                         \
-               cpu_relax();                    \
-       smp_rmb(); /* ctrl + rmb := acquire */  \
-} while (0)
-
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
@@ -545,10 +528,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  * Similar to rcu_dereference(), but for situations where the pointed-to
  * object's lifetime is managed by something other than RCU.  That
  * "something other" might be reference counting or simple immortality.
+ *
+ * The seemingly unused void * variable is to validate @p is indeed a pointer
+ * type. All pointer types silently cast to void *.
  */
 #define lockless_dereference(p) \
 ({ \
        typeof(p) _________p1 = READ_ONCE(p); \
+       __maybe_unused const void * const _________p2 = _________p1; \
        smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
        (_________p1); \
 })
index 98c8615dc300cb843766a157deadca248364d686..d530c4627e54ef1091be820a78d3616166ca8906 100644 (file)
@@ -28,6 +28,13 @@ struct tty_struct;
 #define VT100ID "\033[?1;2c"
 #define VT102ID "\033[?6c"
 
+/**
+ * struct consw - callbacks for consoles
+ *
+ * @con_set_palette: sets the palette of the console to @table (optional)
+ * @con_scrolldelta: the contents of the console should be scrolled by @lines.
+ *                  Invoked by user. (optional)
+ */
 struct consw {
        struct module *owner;
        const char *(*con_startup)(void);
@@ -38,7 +45,6 @@ struct consw {
        void    (*con_putcs)(struct vc_data *, const unsigned short *, int, int, int);
        void    (*con_cursor)(struct vc_data *, int);
        int     (*con_scroll)(struct vc_data *, int, int, int, int);
-       void    (*con_bmove)(struct vc_data *, int, int, int, int, int, int);
        int     (*con_switch)(struct vc_data *);
        int     (*con_blank)(struct vc_data *, int, int);
        int     (*con_font_set)(struct vc_data *, struct console_font *, unsigned);
@@ -47,8 +53,9 @@ struct consw {
        int     (*con_font_copy)(struct vc_data *, int);
        int     (*con_resize)(struct vc_data *, unsigned int, unsigned int,
                               unsigned int);
-       int     (*con_set_palette)(struct vc_data *, const unsigned char *);
-       int     (*con_scrolldelta)(struct vc_data *, int);
+       void    (*con_set_palette)(struct vc_data *,
+                       const unsigned char *table);
+       void    (*con_scrolldelta)(struct vc_data *, int lines);
        int     (*con_set_origin)(struct vc_data *);
        void    (*con_save_screen)(struct vc_data *);
        u8      (*con_build_attr)(struct vc_data *, u8, u8, u8, u8, u8, u8);
index e329ee2667e1954298cf6c152ba37833d1cc2e0d..6fd3c908a340d234aee8ec8c8a99e0612f9c8a91 100644 (file)
@@ -21,6 +21,38 @@ struct uni_pagedir;
 
 #define NPAR 16
 
+/*
+ * Example: vc_data of a console that was scrolled 3 lines down.
+ *
+ *                              Console buffer
+ * vc_screenbuf ---------> +----------------------+-.
+ *                         | initializing W       |  \
+ *                         | initializing X       |   |
+ *                         | initializing Y       |    > scroll-back area
+ *                         | initializing Z       |   |
+ *                         |                      |  /
+ * vc_visible_origin ---> ^+----------------------+-:
+ * (changes by scroll)    || Welcome to linux     |  \
+ *                        ||                      |   |
+ *           vc_rows --->< | login: root          |   |  visible on console
+ *                        || password:            |    > (vc_screenbuf_size is
+ * vc_origin -----------> ||                      |   |   vc_size_row * vc_rows)
+ * (start when no scroll) || Last login: 12:28    |  /
+ *                        v+----------------------+-:
+ *                         | Have a lot of fun... |  \
+ * vc_pos -----------------|--------v             |   > scroll-front area
+ *                         | ~ # cat_             |  /
+ * vc_scr_end -----------> +----------------------+-:
+ * (vc_origin +            |                      |  \ EMPTY, to be filled by
+ *  vc_screenbuf_size)     |                      |  / vc_video_erase_char
+ *                         +----------------------+-'
+ *                         <---- 2 * vc_cols ----->
+ *                         <---- vc_size_row ----->
+ *
+ * Note that every character in the console buffer is accompanied with an
+ * attribute in the buffer right after the character. This is not depicted
+ * in the figure.
+ */
 struct vc_data {
        struct tty_port port;                   /* Upper level data */
 
@@ -74,7 +106,6 @@ struct vc_data {
        unsigned int    vc_decawm       : 1;    /* Autowrap Mode */
        unsigned int    vc_deccm        : 1;    /* Cursor Visible */
        unsigned int    vc_decim        : 1;    /* Insert Mode */
-       unsigned int    vc_deccolm      : 1;    /* 80/132 Column Mode */
        /* attribute flags */
        unsigned int    vc_intensity    : 2;    /* 0=half-bright, 1=normal, 2=bold */
        unsigned int    vc_italic:1;
@@ -136,6 +167,9 @@ extern void vc_SAK(struct work_struct *work);
 
 #define CUR_DEFAULT CUR_UNDERLINE
 
-#define CON_IS_VISIBLE(conp) (*conp->vc_display_fg == conp)
+static inline bool con_is_visible(const struct vc_data *vc)
+{
+       return *vc->vc_display_fg == vc;
+}
 
 #endif /* _LINUX_CONSOLE_STRUCT_H */
index d259274238db361a5b8d41116d03fc4e5642feda..d9aef2a0ec8eb48eb38a891bf263b90cfd43d502 100644 (file)
@@ -31,6 +31,19 @@ static inline void user_exit(void)
                context_tracking_exit(CONTEXT_USER);
 }
 
+/* Called with interrupts disabled.  */
+static inline void user_enter_irqoff(void)
+{
+       if (context_tracking_is_enabled())
+               __context_tracking_enter(CONTEXT_USER);
+
+}
+static inline void user_exit_irqoff(void)
+{
+       if (context_tracking_is_enabled())
+               __context_tracking_exit(CONTEXT_USER);
+}
+
 static inline enum ctx_state exception_enter(void)
 {
        enum ctx_state prev_ctx;
@@ -69,6 +82,8 @@ static inline enum ctx_state ct_state(void)
 #else
 static inline void user_enter(void) { }
 static inline void user_exit(void) { }
+static inline void user_enter_irqoff(void) { }
+static inline void user_exit_irqoff(void) { }
 static inline enum ctx_state exception_enter(void) { return 0; }
 static inline void exception_exit(enum ctx_state prev_ctx) { }
 static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
index 79df69dc629cd61a918ffe185f2fccf9e56ea393..aaff68efba5dab20f33dfcbb102469e4f564cc33 100644 (file)
@@ -39,14 +39,22 @@ struct hsu_dma_chip {
 
 #if IS_ENABLED(CONFIG_HSU_DMA)
 /* Export to the internal users */
-irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr);
+int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr,
+                      u32 *status);
+irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr,
+                          u32 status);
 
 /* Export to the platform drivers */
 int hsu_dma_probe(struct hsu_dma_chip *chip);
 int hsu_dma_remove(struct hsu_dma_chip *chip);
 #else
-static inline irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip,
-                                     unsigned short nr)
+static inline int hsu_dma_get_status(struct hsu_dma_chip *chip,
+                                    unsigned short nr, u32 *status)
+{
+       return 0;
+}
+static inline irqreturn_t hsu_dma_do_irq(struct hsu_dma_chip *chip,
+                                        unsigned short nr, u32 status)
 {
        return IRQ_NONE;
 }
index f196dd0b0f2f6c241715807f63bdccaa30973ce8..7f80a75ee9e36cfab646465863d5b270673e3aff 100644 (file)
@@ -536,116 +536,58 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
 void efi_native_runtime_setup(void);
 
 /*
- *  EFI Configuration Table and GUID definitions
+ * EFI Configuration Table and GUID definitions
+ *
+ * These are all defined in a single line to make them easier to
+ * grep for and to see them at a glance - while still having a
+ * similar structure to the definitions in the spec.
+ *
+ * Here's how they are structured:
+ *
+ * GUID: 12345678-1234-1234-1234-123456789012
+ * Spec:
+ *      #define EFI_SOME_PROTOCOL_GUID \
+ *        {0x12345678,0x1234,0x1234,\
+ *          {0x12,0x34,0x12,0x34,0x56,0x78,0x90,0x12}}
+ * Here:
+ *     #define SOME_PROTOCOL_GUID              EFI_GUID(0x12345678, 0x1234, 0x1234,  0x12, 0x34, 0x12, 0x34, 0x56, 0x78, 0x90, 0x12)
+ *                                     ^ tabs                                      ^extra space
+ *
+ * Note that the 'extra space' separates the values at the same place
+ * where the UEFI SPEC breaks the line.
  */
-#define NULL_GUID \
-       EFI_GUID(0x00000000, 0x0000, 0x0000, \
-                0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
-
-#define MPS_TABLE_GUID    \
-       EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, \
-                0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-
-#define ACPI_TABLE_GUID    \
-       EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, \
-                0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-
-#define ACPI_20_TABLE_GUID    \
-       EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, \
-                0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
-
-#define SMBIOS_TABLE_GUID    \
-       EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, \
-                0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-
-#define SMBIOS3_TABLE_GUID    \
-       EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, \
-                0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
-
-#define SAL_SYSTEM_TABLE_GUID    \
-       EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, \
-                0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-
-#define HCDP_TABLE_GUID        \
-       EFI_GUID(0xf951938d, 0x620b, 0x42ef, \
-                0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
-
-#define UGA_IO_PROTOCOL_GUID \
-       EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, \
-                0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
-
-#define EFI_GLOBAL_VARIABLE_GUID \
-       EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, \
-                0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
-
-#define UV_SYSTEM_TABLE_GUID \
-       EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, \
-                0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
-
-#define LINUX_EFI_CRASH_GUID \
-       EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc, \
-                0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
-
-#define LOADED_IMAGE_PROTOCOL_GUID \
-       EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2, \
-                0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
-
-#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID \
-       EFI_GUID(0x9042a9de, 0x23dc, 0x4a38, \
-                0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
-
-#define EFI_UGA_PROTOCOL_GUID \
-       EFI_GUID(0x982c298b, 0xf4fa, 0x41cb, \
-                0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
-
-#define EFI_PCI_IO_PROTOCOL_GUID \
-       EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5, \
-                0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
-
-#define EFI_FILE_INFO_ID \
-       EFI_GUID(0x9576e92, 0x6d3f, 0x11d2, \
-                0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
-
-#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
-       EFI_GUID(0xb122a263, 0x3661, 0x4f68, \
-                0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
-
-#define EFI_FILE_SYSTEM_GUID \
-       EFI_GUID(0x964e5b22, 0x6459, 0x11d2, \
-                0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
-
-#define DEVICE_TREE_GUID \
-       EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, \
-                0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
-
-#define EFI_PROPERTIES_TABLE_GUID \
-       EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, \
-                0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
-
-#define EFI_RNG_PROTOCOL_GUID \
-       EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
-                0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
-
-#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \
-       EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \
-                0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
-
-#define EFI_CONSOLE_OUT_DEVICE_GUID \
-       EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \
-                0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define NULL_GUID                              EFI_GUID(0x00000000, 0x0000, 0x0000,  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
+#define MPS_TABLE_GUID                         EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3,  0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define ACPI_TABLE_GUID                                EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3,  0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define ACPI_20_TABLE_GUID                     EFI_GUID(0x8868e871, 0xe4f1, 0x11d3,  0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
+#define SMBIOS_TABLE_GUID                      EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3,  0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define SMBIOS3_TABLE_GUID                     EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c,  0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
+#define SAL_SYSTEM_TABLE_GUID                  EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3,  0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+#define HCDP_TABLE_GUID                                EFI_GUID(0xf951938d, 0x620b, 0x42ef,  0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
+#define UGA_IO_PROTOCOL_GUID                   EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b,  0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
+#define EFI_GLOBAL_VARIABLE_GUID               EFI_GUID(0x8be4df61, 0x93ca, 0x11d2,  0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
+#define UV_SYSTEM_TABLE_GUID                   EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd,  0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
+#define LINUX_EFI_CRASH_GUID                   EFI_GUID(0xcfc8fc79, 0xbe2e, 0x4ddc,  0x97, 0xf0, 0x9f, 0x98, 0xbf, 0xe2, 0x98, 0xa0)
+#define LOADED_IMAGE_PROTOCOL_GUID             EFI_GUID(0x5b1b31a1, 0x9562, 0x11d2,  0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID      EFI_GUID(0x9042a9de, 0x23dc, 0x4a38,  0x96, 0xfb, 0x7a, 0xde, 0xd0, 0x80, 0x51, 0x6a)
+#define EFI_UGA_PROTOCOL_GUID                  EFI_GUID(0x982c298b, 0xf4fa, 0x41cb,  0xb8, 0x38, 0x77, 0xaa, 0x68, 0x8f, 0xb8, 0x39)
+#define EFI_PCI_IO_PROTOCOL_GUID               EFI_GUID(0x4cf5b200, 0x68b8, 0x4ca5,  0x9e, 0xec, 0xb2, 0x3e, 0x3f, 0x50, 0x02, 0x9a)
+#define EFI_FILE_INFO_ID                       EFI_GUID(0x09576e92, 0x6d3f, 0x11d2,  0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_SYSTEM_RESOURCE_TABLE_GUID         EFI_GUID(0xb122a263, 0x3661, 0x4f68,  0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80)
+#define EFI_FILE_SYSTEM_GUID                   EFI_GUID(0x964e5b22, 0x6459, 0x11d2,  0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define DEVICE_TREE_GUID                       EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5,  0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0)
+#define EFI_PROPERTIES_TABLE_GUID              EFI_GUID(0x880aaca3, 0x4adc, 0x4a04,  0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5)
+#define EFI_RNG_PROTOCOL_GUID                  EFI_GUID(0x3152bca5, 0xeade, 0x433d,  0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
+#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID       EFI_GUID(0xdcfa911d, 0x26eb, 0x469f,  0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
+#define EFI_CONSOLE_OUT_DEVICE_GUID            EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4,  0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
 
 /*
  * This GUID is used to pass to the kernel proper the struct screen_info
  * structure that was populated by the stub based on the GOP protocol instance
  * associated with ConOut
  */
-#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID \
-       EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, \
-                0xb9, 0xe, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
-
-#define LINUX_EFI_LOADER_ENTRY_GUID \
-       EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, \
-                0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
+#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID   EFI_GUID(0xe03fc20a, 0x85dc, 0x406e,  0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+#define LINUX_EFI_LOADER_ENTRY_GUID            EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf,  0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
 
 typedef struct {
        efi_guid_t guid;
@@ -975,7 +917,6 @@ extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
 extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
 extern void efi_initialize_iomem_resources(struct resource *code_resource,
                struct resource *data_resource, struct resource *bss_resource);
-extern void efi_get_time(struct timespec *now);
 extern void efi_reserve_boot_services(void);
 extern int efi_get_fdt_params(struct efi_fdt_params *params);
 extern struct kobject *efi_kobj;
@@ -1465,4 +1406,55 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
                           unsigned long size);
 
 bool efi_runtime_disabled(void);
+extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
+
+/*
+ * Arch code can implement the following three template macros, avoiding
+ * reptition for the void/non-void return cases of {__,}efi_call_virt():
+ *
+ *  * arch_efi_call_virt_setup()
+ *
+ *    Sets up the environment for the call (e.g. switching page tables,
+ *    allowing kernel-mode use of floating point, if required).
+ *
+ *  * arch_efi_call_virt()
+ *
+ *    Performs the call. The last expression in the macro must be the call
+ *    itself, allowing the logic to be shared by the void and non-void
+ *    cases.
+ *
+ *  * arch_efi_call_virt_teardown()
+ *
+ *    Restores the usual kernel environment once the call has returned.
+ */
+
+#define efi_call_virt_pointer(p, f, args...)                           \
+({                                                                     \
+       efi_status_t __s;                                               \
+       unsigned long __flags;                                          \
+                                                                       \
+       arch_efi_call_virt_setup();                                     \
+                                                                       \
+       local_save_flags(__flags);                                      \
+       __s = arch_efi_call_virt(p, f, args);                           \
+       efi_call_virt_check_flags(__flags, __stringify(f));             \
+                                                                       \
+       arch_efi_call_virt_teardown();                                  \
+                                                                       \
+       __s;                                                            \
+})
+
+#define __efi_call_virt_pointer(p, f, args...)                         \
+({                                                                     \
+       unsigned long __flags;                                          \
+                                                                       \
+       arch_efi_call_virt_setup();                                     \
+                                                                       \
+       local_save_flags(__flags);                                      \
+       arch_efi_call_virt(p, f, args);                                 \
+       efi_call_virt_check_flags(__flags, __stringify(f));             \
+                                                                       \
+       arch_efi_call_virt_teardown();                                  \
+})
+
 #endif /* _LINUX_EFI_H */
index 7abf674c388c488ab0a0b97ea63a689d93af3d4e..61004413dc649594ca17b2d0453678d1b2cc621f 100644 (file)
@@ -126,42 +126,6 @@ struct extcon_dev {
        struct device_attribute *d_attrs_muex;
 };
 
-/**
- * struct extcon_cable - An internal data for each cable of extcon device.
- * @edev:              The extcon device
- * @cable_index:       Index of this cable in the edev
- * @attr_g:            Attribute group for the cable
- * @attr_name:         "name" sysfs entry
- * @attr_state:                "state" sysfs entry
- * @attrs:             Array pointing to attr_name and attr_state for attr_g
- */
-struct extcon_cable {
-       struct extcon_dev *edev;
-       int cable_index;
-
-       struct attribute_group attr_g;
-       struct device_attribute attr_name;
-       struct device_attribute attr_state;
-
-       struct attribute *attrs[3]; /* to be fed to attr_g.attrs */
-};
-
-/**
- * struct extcon_specific_cable_nb - An internal data for
- *                                  extcon_register_interest().
- * @user_nb:           user provided notifier block for events from
- *                     a specific cable.
- * @cable_index:       the target cable.
- * @edev:              the target extcon device.
- * @previous_value:    the saved previous event value.
- */
-struct extcon_specific_cable_nb {
-       struct notifier_block *user_nb;
-       int cable_index;
-       struct extcon_dev *edev;
-       unsigned long previous_value;
-};
-
 #if IS_ENABLED(CONFIG_EXTCON)
 
 /*
@@ -201,29 +165,12 @@ extern int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state);
 
 /*
  * get/set_cable_state access each bit of the 32b encoded state value.
- * They are used to access the status of each cable based on the cable_name.
+ * They are used to access the status of each cable based on the cable id.
  */
 extern int extcon_get_cable_state_(struct extcon_dev *edev, unsigned int id);
 extern int extcon_set_cable_state_(struct extcon_dev *edev, unsigned int id,
                                   bool cable_state);
 
-extern int extcon_get_cable_state(struct extcon_dev *edev,
-                                 const char *cable_name);
-extern int extcon_set_cable_state(struct extcon_dev *edev,
-                                 const char *cable_name, bool cable_state);
-
-/*
- * Following APIs are for notifiees (those who want to be notified)
- * to register a callback for events from a specific cable of the extcon.
- * Notifiees are the connected device drivers wanting to get notified by
- * a specific external port of a connection device.
- */
-extern int extcon_register_interest(struct extcon_specific_cable_nb *obj,
-                                   const char *extcon_name,
-                                   const char *cable_name,
-                                   struct notifier_block *nb);
-extern int extcon_unregister_interest(struct extcon_specific_cable_nb *nb);
-
 /*
  * Following APIs are to monitor every action of a notifier.
  * Registrar gets notified for every external port of a connection device.
@@ -235,6 +182,12 @@ extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
                                    struct notifier_block *nb);
 extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
                                    struct notifier_block *nb);
+extern int devm_extcon_register_notifier(struct device *dev,
+                               struct extcon_dev *edev, unsigned int id,
+                               struct notifier_block *nb);
+extern void devm_extcon_unregister_notifier(struct device *dev,
+                               struct extcon_dev *edev, unsigned int id,
+                               struct notifier_block *nb);
 
 /*
  * Following API get the extcon device from devicetree.
@@ -246,6 +199,7 @@ extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
 /* Following API to get information of extcon device */
 extern const char *extcon_get_edev_name(struct extcon_dev *edev);
 
+
 #else /* CONFIG_EXTCON */
 static inline int extcon_dev_register(struct extcon_dev *edev)
 {
@@ -306,18 +260,6 @@ static inline int extcon_set_cable_state_(struct extcon_dev *edev,
        return 0;
 }
 
-static inline int extcon_get_cable_state(struct extcon_dev *edev,
-                       const char *cable_name)
-{
-       return 0;
-}
-
-static inline int extcon_set_cable_state(struct extcon_dev *edev,
-                       const char *cable_name, int state)
-{
-       return 0;
-}
-
 static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
 {
        return NULL;
@@ -337,19 +279,16 @@ static inline int extcon_unregister_notifier(struct extcon_dev *edev,
        return 0;
 }
 
-static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
-                                          const char *extcon_name,
-                                          const char *cable_name,
-                                          struct notifier_block *nb)
+static inline int devm_extcon_register_notifier(struct device *dev,
+                               struct extcon_dev *edev, unsigned int id,
+                               struct notifier_block *nb)
 {
-       return 0;
+       return -ENOSYS;
 }
 
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
-                                                   *obj)
-{
-       return 0;
-}
+static inline  void devm_extcon_unregister_notifier(struct device *dev,
+                               struct extcon_dev *edev, unsigned int id,
+                               struct notifier_block *nb) { }
 
 static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
                                                            int index)
@@ -357,4 +296,28 @@ static inline struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
        return ERR_PTR(-ENODEV);
 }
 #endif /* CONFIG_EXTCON */
+
+/*
+ * Following structure and API are deprecated. EXTCON remains the function
+ * definition to prevent the build break.
+ */
+struct extcon_specific_cable_nb {
+       struct notifier_block *user_nb;
+       int cable_index;
+       struct extcon_dev *edev;
+       unsigned long previous_value;
+};
+
+static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
+                       const char *extcon_name, const char *cable_name,
+                       struct notifier_block *nb)
+{
+       return -EINVAL;
+}
+
+static inline int extcon_unregister_interest(struct extcon_specific_cable_nb
+                                                   *obj)
+{
+       return -EINVAL;
+}
 #endif /* __LINUX_EXTCON_H__ */
index 53c60806bcfb6d28cdb209aa41a619fee928513f..ac85f2061351996e871da73a26fd0dcf81293bad 100644 (file)
@@ -53,6 +53,7 @@ struct adc_jack_cond {
  *                     milli-seconds after the interrupt occurs. You may
  *                     describe such delays with @handling_delay_ms, which
  *                     is rounded-off by jiffies.
+ * @wakeup_source:     flag to wake up the system for extcon events.
  */
 struct adc_jack_pdata {
        const char *name;
@@ -65,6 +66,7 @@ struct adc_jack_pdata {
 
        unsigned long irq_flags;
        unsigned long handling_delay_ms; /* in ms */
+       bool wakeup_source;
 };
 
 #endif /* _EXTCON_ADC_JACK_H */
index 2056e9fd0138357726864b61c6af1dea9fa11981..1de1b3f6fb76d9579f4b87ef429b5e7dbe607ed8 100644 (file)
@@ -81,8 +81,6 @@ struct fence {
        unsigned long flags;
        ktime_t timestamp;
        int status;
-       struct list_head child_list;
-       struct list_head active_list;
 };
 
 enum fence_flag_bits {
index 6fc31ef1da2d8e9efe5d91b8e4349ed4fe0dd809..8f74f3d61894a4ad58b5cc3457f68f318a84c970 100644 (file)
@@ -467,7 +467,11 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 }
 #endif /* CONFIG_DEBUG_SET_MODULE_RONX */
 
-int sk_filter(struct sock *sk, struct sk_buff *skb);
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
+static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
+{
+       return sk_filter_trim_cap(sk, skb, 1);
+}
 
 struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err);
 void bpf_prog_free(struct bpf_prog *fp);
index 419fb9e03447aff8aef55934e89bbd844a28d7e7..f0a7a0320300bae6eca05e27a2083f80ed54282e 100644 (file)
@@ -94,7 +94,7 @@ static inline int split_huge_page(struct page *page)
 void deferred_split_huge_page(struct page *page);
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long address, bool freeze);
+               unsigned long address, bool freeze, struct page *page);
 
 #define split_huge_pmd(__vma, __pmd, __address)                                \
        do {                                                            \
@@ -102,7 +102,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
                if (pmd_trans_huge(*____pmd)                            \
                                        || pmd_devmap(*____pmd))        \
                        __split_huge_pmd(__vma, __pmd, __address,       \
-                                               false);                 \
+                                               false, NULL);           \
        }  while (0)
 
 
index 99403b19092f2da0ecadaffd21e9fc8065539434..228bd44efa4c6efecd3af2e3ec16be8bd02d3d1b 100644 (file)
@@ -223,6 +223,7 @@ struct st_sensor_settings {
  * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
  * @tf: Transfer function structure used by I/O operations.
  * @tb: Transfer buffers and mutex used by I/O operations.
+ * @edge_irq: the IRQ triggers on edges and need special handling.
  * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
  * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
  */
@@ -250,14 +251,13 @@ struct st_sensor_data {
        const struct st_sensor_transfer_function *tf;
        struct st_sensor_transfer_buffer tb;
 
+       bool edge_irq;
        bool hw_irq_trigger;
        s64 hw_timestamp;
 };
 
 #ifdef CONFIG_IIO_BUFFER
 irqreturn_t st_sensors_trigger_handler(int irq, void *p);
-
-int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf);
 #endif
 
 #ifdef CONFIG_IIO_TRIGGER
@@ -287,7 +287,7 @@ int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable);
 
 int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable);
 
-void st_sensors_power_enable(struct iio_dev *indio_dev);
+int st_sensors_power_enable(struct iio_dev *indio_dev);
 
 void st_sensors_power_disable(struct iio_dev *indio_dev);
 
index 7c29cb0124ae2f0e0a97764028f74985bf9e879f..854e2dad1e0df71af7f0d29f443e07ed77b3f24f 100644 (file)
@@ -312,13 +312,8 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
                },                                                      \
 }
 
-/**
- * iio_get_time_ns() - utility function to get a time stamp for events etc
- **/
-static inline s64 iio_get_time_ns(void)
-{
-       return ktime_get_real_ns();
-}
+s64 iio_get_time_ns(const struct iio_dev *indio_dev);
+unsigned int iio_get_time_res(const struct iio_dev *indio_dev);
 
 /* Device operating modes */
 #define INDIO_DIRECT_MODE              0x01
@@ -497,6 +492,7 @@ struct iio_buffer_setup_ops {
  * @chan_attr_group:   [INTERN] group for all attrs in base directory
  * @name:              [DRIVER] name of the device.
  * @info:              [DRIVER] callbacks and constant info from driver
+ * @clock_id:          [INTERN] timestamping clock posix identifier
  * @info_exist_lock:   [INTERN] lock to prevent use during removal
  * @setup_ops:         [DRIVER] callbacks to call before and after buffer
  *                     enable/disable
@@ -537,6 +533,7 @@ struct iio_dev {
        struct attribute_group          chan_attr_group;
        const char                      *name;
        const struct iio_info           *info;
+       clockid_t                       clock_id;
        struct mutex                    info_exist_lock;
        const struct iio_buffer_setup_ops       *setup_ops;
        struct cdev                     chrdev;
@@ -565,7 +562,7 @@ extern struct bus_type iio_bus_type;
 
 /**
  * iio_device_put() - reference counted deallocation of struct device
- * @indio_dev:                 IIO device structure containing the device
+ * @indio_dev: IIO device structure containing the device
  **/
 static inline void iio_device_put(struct iio_dev *indio_dev)
 {
@@ -573,6 +570,15 @@ static inline void iio_device_put(struct iio_dev *indio_dev)
                put_device(&indio_dev->dev);
 }
 
+/**
+ * iio_device_get_clock() - Retrieve current timestamping clock for the device
+ * @indio_dev: IIO device structure containing the device
+ */
+static inline clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
+{
+       return indio_dev->clock_id;
+}
+
 /**
  * dev_to_iio_dev() - Get IIO device struct from a device struct
  * @dev:               The device embedded in the IIO device
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
new file mode 100644 (file)
index 0000000..23ca415
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Industrial I/O software device interface
+ *
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __IIO_SW_DEVICE
+#define __IIO_SW_DEVICE
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/configfs.h>
+
+#define module_iio_sw_device_driver(__iio_sw_device_type) \
+       module_driver(__iio_sw_device_type, iio_register_sw_device_type, \
+                     iio_unregister_sw_device_type)
+
+struct iio_sw_device_ops;
+
+struct iio_sw_device_type {
+       const char *name;
+       struct module *owner;
+       const struct iio_sw_device_ops *ops;
+       struct list_head list;
+       struct config_group *group;
+};
+
+struct iio_sw_device {
+       struct iio_dev *device;
+       struct iio_sw_device_type *device_type;
+       struct config_group group;
+};
+
+struct iio_sw_device_ops {
+       struct iio_sw_device* (*probe)(const char *);
+       int (*remove)(struct iio_sw_device *);
+};
+
+static inline
+struct iio_sw_device *to_iio_sw_device(struct config_item *item)
+{
+       return container_of(to_config_group(item), struct iio_sw_device,
+                           group);
+}
+
+int iio_register_sw_device_type(struct iio_sw_device_type *dt);
+void iio_unregister_sw_device_type(struct iio_sw_device_type *dt);
+
+struct iio_sw_device *iio_sw_device_create(const char *, const char *);
+void iio_sw_device_destroy(struct iio_sw_device *);
+
+int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt);
+void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
+
+static inline
+void iio_swd_group_init_type_name(struct iio_sw_device *d,
+                                 const char *name,
+                                 struct config_item_type *type)
+{
+#ifdef CONFIG_CONFIGFS_FS
+       config_group_init_type_name(&d->group, name, type);
+#endif
+}
+
+#endif /* __IIO_SW_DEVICE */
index 94aa10ffe15615725e69417945a1ea5041c93384..c42082112ec8d59c052041a44fbb339f0adf9e9e 100644 (file)
@@ -451,6 +451,7 @@ extern int panic_on_oops;
 extern int panic_on_unrecovered_nmi;
 extern int panic_on_io_nmi;
 extern int panic_on_warn;
+extern int sysctl_panic_on_rcu_stall;
 extern int sysctl_panic_on_stackoverflow;
 
 extern bool crash_kexec_post_notifiers;
index 5356f4d661a721ba0446b1183e2a834f3bf3b56f..5183138aa932da88c83177b9f8372bd9c3437e13 100644 (file)
@@ -678,6 +678,16 @@ static inline bool hlist_fake(struct hlist_node *h)
        return h->pprev == &h->next;
 }
 
+/*
+ * Check whether the node is the only node of the head without
+ * accessing head:
+ */
+static inline bool
+hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
+{
+       return !n->next && n->pprev == &h->first;
+}
+
 /*
  * Move a list from one list head to another. Fixup the pprev
  * reference of the first entry if it exists.
index a805474df4abd8c70c83bdbd3b383ad9c02eca1c..56e6069d245271539f14bf34c204122665e28ab6 100644 (file)
@@ -97,6 +97,11 @@ enum mem_cgroup_events_target {
 #define MEM_CGROUP_ID_SHIFT    16
 #define MEM_CGROUP_ID_MAX      USHRT_MAX
 
+struct mem_cgroup_id {
+       int id;
+       atomic_t ref;
+};
+
 struct mem_cgroup_stat_cpu {
        long count[MEMCG_NR_STAT];
        unsigned long events[MEMCG_NR_EVENTS];
@@ -172,6 +177,9 @@ enum memcg_kmem_state {
 struct mem_cgroup {
        struct cgroup_subsys_state css;
 
+       /* Private memcg ID. Used to ID objects that outlive the cgroup */
+       struct mem_cgroup_id id;
+
        /* Accounted resources */
        struct page_counter memory;
        struct page_counter swap;
@@ -330,22 +338,9 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
        if (mem_cgroup_disabled())
                return 0;
 
-       return memcg->css.id;
-}
-
-/**
- * mem_cgroup_from_id - look up a memcg from an id
- * @id: the id to look up
- *
- * Caller must hold rcu_read_lock() and use css_tryget() as necessary.
- */
-static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
-{
-       struct cgroup_subsys_state *css;
-
-       css = css_from_id(id, &memory_cgrp_subsys);
-       return mem_cgroup_from_css(css);
+       return memcg->id.id;
 }
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id);
 
 /**
  * parent_mem_cgroup - find the accounting parent of a memcg
index 80776d0c52dc9c48b7a02842caacbf9699e73507..fd72ecf0ce9fe55e3e0b1be1bc1ea6231c4378f2 100644 (file)
@@ -629,6 +629,7 @@ struct mlx5_cmd_work_ent {
        void                   *uout;
        int                     uout_size;
        mlx5_cmd_cbk_t          callback;
+       struct delayed_work     cb_timeout_work;
        void                   *context;
        int                     idx;
        struct completion       done;
index ca3e517980a0a2d3d58c6566350456f828e4446f..917f2b6a0cdee69a111e17312715483b287f7b2c 100644 (file)
@@ -594,6 +594,9 @@ struct vm_special_mapping {
        int (*fault)(const struct vm_special_mapping *sm,
                     struct vm_area_struct *vma,
                     struct vm_fault *vmf);
+
+       int (*mremap)(const struct vm_special_mapping *sm,
+                    struct vm_area_struct *new_vma);
 };
 
 enum tlb_flush_reason {
index 6e4c645e1c0d473821551e8ca4252b52c894578f..ed84c07f6a510f48e24d53e0d54afaeb50a182f3 100644 (file)
@@ -657,4 +657,20 @@ struct ulpi_device_id {
        kernel_ulong_t driver_data;
 };
 
+/**
+ * struct fsl_mc_device_id - MC object device identifier
+ * @vendor: vendor ID
+ * @obj_type: MC object type
+ * @ver_major: MC object version major number
+ * @ver_minor: MC object version minor number
+ *
+ * Type of entries in the "device Id" table for MC object devices supported by
+ * a MC object device driver. The last entry of the table has vendor set to 0x0
+ */
+struct fsl_mc_device_id {
+       __u16 vendor;
+       const char obj_type[16];
+};
+
+
 #endif /* LINUX_MOD_DEVICETABLE_H */
index f45929ce815725d868261e9a2585ac53d0c8f128..da4b33bea9828857a28ab9060cbfe2d491cd3058 100644 (file)
@@ -4145,6 +4145,13 @@ static inline void netif_keep_dst(struct net_device *dev)
        dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
 }
 
+/* return true if dev can't cope with mtu frames that need vlan tag insertion */
+static inline bool netif_reduces_vlan_mtu(struct net_device *dev)
+{
+       /* TODO: reserve and use an additional IFF bit, if we get more users */
+       return dev->priv_flags & IFF_MACSEC;
+}
+
 extern struct pernet_operations __net_initdata loopback_net_ops;
 
 /* Logging, debugging and troubleshooting/diagnostic helpers. */
index 9bb77d3ed6e0c5a35f8c77c0fb20821a237984ea..c2256d74654333b9103fab444c4e318e2c7d422a 100644 (file)
@@ -74,7 +74,7 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell)
 {
 }
 
-static inline char *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
+static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
 {
        return ERR_PTR(-ENOSYS);
 }
index 74eb28cadbef032017ae12fe4d11ab4f70996d63..15c43f076b2307c4bfc53c0f0f7cec03e2e6f844 100644 (file)
@@ -1009,10 +1009,13 @@ static inline int of_get_available_child_count(const struct device_node *np)
 #endif
 
 typedef int (*of_init_fn_2)(struct device_node *, struct device_node *);
+typedef int (*of_init_fn_1_ret)(struct device_node *);
 typedef void (*of_init_fn_1)(struct device_node *);
 
 #define OF_DECLARE_1(table, name, compat, fn) \
                _OF_DECLARE(table, name, compat, fn, of_init_fn_1)
+#define OF_DECLARE_1_RET(table, name, compat, fn) \
+               _OF_DECLARE(table, name, compat, fn, of_init_fn_1_ret)
 #define OF_DECLARE_2(table, name, compat, fn) \
                _OF_DECLARE(table, name, compat, fn, of_init_fn_2)
 
index 84f542df7ff5cca0932f02b6629f8229244c2d1a..1c7eec09e5eba7ae8c0cc8e82172791f992bb361 100644 (file)
@@ -136,14 +136,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
         * used as a pointer.  If the compiler generates a separate fetch
         * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
         * between contaminating the pointer value, meaning that
-        * ACCESS_ONCE() is required when fetching it.
-        *
-        * Also, we need a data dependency barrier to be paired with
-        * smp_store_release() in __percpu_ref_switch_to_percpu().
-        *
-        * Use lockless deref which contains both.
+        * READ_ONCE() is required when fetching it.
         */
-       percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
+       percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
+
+       /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */
+       smp_read_barrier_depends();
 
        /*
         * Theoretically, the following could test just ATOMIC; however,
index 1a827cecd62fa36b8f7cec08a11deb7d65d000e2..7921f4f20a5890a96d2099c05a105d51a3ec2c3d 100644 (file)
@@ -517,6 +517,11 @@ struct swevent_hlist {
 struct perf_cgroup;
 struct ring_buffer;
 
+struct pmu_event_list {
+       raw_spinlock_t          lock;
+       struct list_head        list;
+};
+
 /**
  * struct perf_event - performance event kernel representation:
  */
@@ -675,6 +680,7 @@ struct perf_event {
        int                             cgrp_defer_enabled;
 #endif
 
+       struct list_head                sb_list;
 #endif /* CONFIG_PERF_EVENTS */
 };
 
@@ -1074,7 +1080,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
 extern struct perf_callchain_entry *
 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
                   u32 max_stack, bool crosstask, bool add_mark);
-extern int get_callchain_buffers(void);
+extern int get_callchain_buffers(int max_stack);
 extern void put_callchain_buffers(void);
 
 extern int sysctl_perf_event_max_stack;
@@ -1326,6 +1332,13 @@ struct perf_pmu_events_attr {
        const char *event_str;
 };
 
+struct perf_pmu_events_ht_attr {
+       struct device_attribute                 attr;
+       u64                                     id;
+       const char                              *event_str_ht;
+       const char                              *event_str_noht;
+};
+
 ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
                              char *page);
 
index a810f2a18842e51c996b6216e73178141ecb6962..f08b67238b58d93bcb46447122f49281d45d53c0 100644 (file)
 
 struct phy;
 
+enum phy_mode {
+       PHY_MODE_INVALID,
+       PHY_MODE_USB_HOST,
+       PHY_MODE_USB_DEVICE,
+       PHY_MODE_USB_OTG,
+};
+
 /**
  * struct phy_ops - set of function pointers for performing phy operations
  * @init: operation to be performed for initializing phy
  * @exit: operation to be performed while exiting
  * @power_on: powering on the phy
  * @power_off: powering off the phy
+ * @set_mode: set the mode of the phy
  * @owner: the module owner containing the ops
  */
 struct phy_ops {
@@ -35,6 +43,7 @@ struct phy_ops {
        int     (*exit)(struct phy *phy);
        int     (*power_on)(struct phy *phy);
        int     (*power_off)(struct phy *phy);
+       int     (*set_mode)(struct phy *phy, enum phy_mode mode);
        struct module *owner;
 };
 
@@ -126,6 +135,7 @@ int phy_init(struct phy *phy);
 int phy_exit(struct phy *phy);
 int phy_power_on(struct phy *phy);
 int phy_power_off(struct phy *phy);
+int phy_set_mode(struct phy *phy, enum phy_mode mode);
 static inline int phy_get_bus_width(struct phy *phy)
 {
        return phy->attrs.bus_width;
@@ -233,6 +243,13 @@ static inline int phy_power_off(struct phy *phy)
        return -ENOSYS;
 }
 
+static inline int phy_set_mode(struct phy *phy, enum phy_mode mode)
+{
+       if (!phy)
+               return 0;
+       return -ENOSYS;
+}
+
 static inline int phy_get_bus_width(struct phy *phy)
 {
        return -ENOSYS;
diff --git a/include/linux/platform_data/sht3x.h b/include/linux/platform_data/sht3x.h
new file mode 100644 (file)
index 0000000..2e5eea3
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2016 Sensirion AG, Switzerland
+ * Author: David Frey <david.frey@sensirion.com>
+ * Author: Pascal Sachs <pascal.sachs@sensirion.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SHT3X_H_
+#define __SHT3X_H_
+
+struct sht3x_platform_data {
+       bool blocking_io;
+       bool high_precision;
+};
+#endif /* __SHT3X_H_ */
index 5b5a80cc59265882813cddba568b0875797e5e6b..c818772d9f9d13538309226a89894b03a78c1aa4 100644 (file)
@@ -43,10 +43,8 @@ struct posix_acl_entry {
 };
 
 struct posix_acl {
-       union {
-               atomic_t                a_refcount;
-               struct rcu_head         a_rcu;
-       };
+       atomic_t                a_refcount;
+       struct rcu_head         a_rcu;
        unsigned int            a_count;
        struct posix_acl_entry  a_entries[0];
 };
index f4da695fd615501952e584c502a69bdcb763438b..f136b22c7772754ca2e5945ba2c9929e39c4bbf5 100644 (file)
@@ -108,11 +108,14 @@ struct va_format {
  * Dummy printk for disabled debugging statements to use whilst maintaining
  * gcc's format checking.
  */
-#define no_printk(fmt, ...)                    \
-do {                                           \
-       if (0)                                  \
-               printk(fmt, ##__VA_ARGS__);     \
-} while (0)
+#define no_printk(fmt, ...)                            \
+({                                                     \
+       do {                                            \
+               if (0)                                  \
+                       printk(fmt, ##__VA_ARGS__);     \
+       } while (0);                                    \
+       0;                                              \
+})
 
 #ifdef CONFIG_EARLY_PRINTK
 extern asmlinkage __printf(1, 2)
@@ -309,20 +312,24 @@ extern asmlinkage void dump_stack(void) __cold;
 #define printk_once(fmt, ...)                                  \
 ({                                                             \
        static bool __print_once __read_mostly;                 \
+       bool __ret_print_once = !__print_once;                  \
                                                                \
        if (!__print_once) {                                    \
                __print_once = true;                            \
                printk(fmt, ##__VA_ARGS__);                     \
        }                                                       \
+       unlikely(__ret_print_once);                             \
 })
 #define printk_deferred_once(fmt, ...)                         \
 ({                                                             \
        static bool __print_once __read_mostly;                 \
+       bool __ret_print_once = !__print_once;                  \
                                                                \
        if (!__print_once) {                                    \
                __print_once = true;                            \
                printk_deferred(fmt, ##__VA_ARGS__);            \
        }                                                       \
+       unlikely(__ret_print_once);                             \
 })
 #else
 #define printk_once(fmt, ...)                                  \
index cb4b7e8cee81a40cbfedf0a781c7f701ed9e6ee0..eca6f626c16e7d513489341327588846e1239ffb 100644 (file)
@@ -407,6 +407,7 @@ static inline __must_check
 void **radix_tree_iter_retry(struct radix_tree_iter *iter)
 {
        iter->next_index = iter->index;
+       iter->tags = 0;
        return NULL;
 }
 
index e47e533742b5e35fe5ec21263ae966f64ff1ff4a..3d6e9815cd854d6d093e8082df312b63790b3d11 100644 (file)
@@ -95,27 +95,27 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
 #ifdef CONFIG_ARCH_RANDOM
 # include <asm/archrandom.h>
 #else
-static inline int arch_get_random_long(unsigned long *v)
+static inline bool arch_get_random_long(unsigned long *v)
 {
        return 0;
 }
-static inline int arch_get_random_int(unsigned int *v)
+static inline bool arch_get_random_int(unsigned int *v)
 {
        return 0;
 }
-static inline int arch_has_random(void)
+static inline bool arch_has_random(void)
 {
        return 0;
 }
-static inline int arch_get_random_seed_long(unsigned long *v)
+static inline bool arch_get_random_seed_long(unsigned long *v)
 {
        return 0;
 }
-static inline int arch_get_random_seed_int(unsigned int *v)
+static inline bool arch_get_random_seed_int(unsigned int *v)
 {
        return 0;
 }
-static inline int arch_has_random_seed(void)
+static inline bool arch_has_random_seed(void)
 {
        return 0;
 }
index 5f1533e3d03206d2d64f67a6a1db51b785f0a1fb..3bc5de08c0b785e5755e05a611356ac8ea4ae21e 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/bug.h>
 #include <linux/compiler.h>
 #include <linux/ktime.h>
+#include <linux/irqflags.h>
 
 #include <asm/barrier.h>
 
@@ -379,12 +380,13 @@ static inline void rcu_init_nohz(void)
  * in the inner idle loop.
  *
  * This macro provides the way out:  RCU_NONIDLE(do_something_with_RCU())
- * will tell RCU that it needs to pay attending, invoke its argument
- * (in this example, a call to the do_something_with_RCU() function),
+ * will tell RCU that it needs to pay attention, invoke its argument
+ * (in this example, calling the do_something_with_RCU() function),
  * and then tell RCU to go back to ignoring this CPU.  It is permissible
- * to nest RCU_NONIDLE() wrappers, but the nesting level is currently
- * quite limited.  If deeper nesting is required, it will be necessary
- * to adjust DYNTICK_TASK_NESTING_VALUE accordingly.
+ * to nest RCU_NONIDLE() wrappers, but not indefinitely (but the limit is
+ * on the order of a million or so, even on 32-bit systems).  It is
+ * not legal to block within RCU_NONIDLE(), nor is it permissible to
+ * transfer control either into or out of RCU_NONIDLE()'s statement.
  */
 #define RCU_NONIDLE(a) \
        do { \
@@ -649,7 +651,16 @@ static inline void rcu_preempt_sleep_check(void)
  * please be careful when making changes to rcu_assign_pointer() and the
  * other macros that it invokes.
  */
-#define rcu_assign_pointer(p, v) smp_store_release(&p, RCU_INITIALIZER(v))
+#define rcu_assign_pointer(p, v)                                             \
+({                                                                           \
+       uintptr_t _r_a_p__v = (uintptr_t)(v);                                 \
+                                                                             \
+       if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL)        \
+               WRITE_ONCE((p), (typeof(p))(_r_a_p__v));                      \
+       else                                                                  \
+               smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \
+       _r_a_p__v;                                                            \
+})
 
 /**
  * rcu_access_pointer() - fetch RCU pointer with no dereferencing
index 49eb4f8ebac9636a394dbe097532c70a08e056d7..2b0fad83683f793959fbe330045abb7c684edfcf 100644 (file)
@@ -158,7 +158,7 @@ struct anon_vma *page_get_anon_vma(struct page *page);
 /*
  * rmap interfaces called when adding or removing pte of page
  */
-void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
+void page_move_anon_rmap(struct page *, struct vm_area_struct *);
 void page_add_anon_rmap(struct page *, struct vm_area_struct *,
                unsigned long, bool);
 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
index d37fbb34d06fed4494b3152300d71aba23663f63..dd1d142503404da2775808630bda2021a597229e 100644 (file)
@@ -23,10 +23,11 @@ struct rw_semaphore;
 
 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
 #include <linux/rwsem-spinlock.h> /* use a generic implementation */
+#define __RWSEM_INIT_COUNT(name)       .count = RWSEM_UNLOCKED_VALUE
 #else
 /* All arch specific implementations share the same struct */
 struct rw_semaphore {
-       long count;
+       atomic_long_t count;
        struct list_head wait_list;
        raw_spinlock_t wait_lock;
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
@@ -54,9 +55,10 @@ extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
 /* In all implementations count != 0 means locked */
 static inline int rwsem_is_locked(struct rw_semaphore *sem)
 {
-       return sem->count != 0;
+       return atomic_long_read(&sem->count) != 0;
 }
 
+#define __RWSEM_INIT_COUNT(name)       .count = ATOMIC_LONG_INIT(RWSEM_UNLOCKED_VALUE)
 #endif
 
 /* Common initializer macros and functions */
@@ -74,7 +76,7 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 #endif
 
 #define __RWSEM_INITIALIZER(name)                              \
-       { .count = RWSEM_UNLOCKED_VALUE,                        \
+       { __RWSEM_INIT_COUNT(name),                             \
          .wait_list = LIST_HEAD_INIT((name).wait_list),        \
          .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
          __RWSEM_OPT_INIT(name)                                \
index 253538f29ade890dcd48d1708fcd15ae362653be..d99218a1e04370683dd239de59c38e48a04d2eb7 100644 (file)
@@ -219,9 +219,10 @@ extern void proc_sched_set_task(struct task_struct *p);
 #define TASK_WAKING            256
 #define TASK_PARKED            512
 #define TASK_NOLOAD            1024
-#define TASK_STATE_MAX         2048
+#define TASK_NEW               2048
+#define TASK_STATE_MAX         4096
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -2139,6 +2140,9 @@ static inline void put_task_struct(struct task_struct *t)
                __put_task_struct(t);
 }
 
+struct task_struct *task_rcu_dereference(struct task_struct **ptask);
+struct task_struct *try_get_task_struct(struct task_struct **ptask);
+
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 extern void task_cputime(struct task_struct *t,
                         cputime_t *utime, cputime_t *stime);
index 48ec7651989b093fc015e44b59657c7000e11012..923266cd294a33c8d98cbe274df0991f6647dafa 100644 (file)
@@ -111,6 +111,7 @@ struct uart_8250_port {
                                                 *   if no_console_suspend
                                                 */
        unsigned char           probe;
+       struct mctrl_gpios      *gpios;
 #define UART_PROBE_RSA (1 << 0)
 
        /*
index a3d7c0d4a03e75016023184a5fea18c91d9d8dbc..2f44e20136545162d15ee9a39c2eae38352580d1 100644 (file)
@@ -352,9 +352,15 @@ struct earlycon_id {
 extern const struct earlycon_id __earlycon_table[];
 extern const struct earlycon_id __earlycon_table_end[];
 
+#if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
+#define EARLYCON_USED_OR_UNUSED        __used
+#else
+#define EARLYCON_USED_OR_UNUSED        __maybe_unused
+#endif
+
 #define OF_EARLYCON_DECLARE(_name, compat, fn)                         \
        static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
-            __used __section(__earlycon_table)                         \
+            EARLYCON_USED_OR_UNUSED __section(__earlycon_table)        \
                = { .name = __stringify(_name),                         \
                    .compatible = compat,                               \
                    .setup = fn  }
index d9b436f099258c99b761745fdc35a29910b6dfe5..e0e1597ef9e6cb9b464839f88c909cc9e60e1976 100644 (file)
@@ -156,6 +156,7 @@ struct sfi_device_table_entry {
 #define SFI_DEV_TYPE_UART      2
 #define SFI_DEV_TYPE_HSI       3
 #define SFI_DEV_TYPE_IPC       4
+#define SFI_DEV_TYPE_SD                5
 
        u8      host_num;       /* attached to host 0, 1...*/
        u16     addr;
index ee38a41274759f279be1c0752a7fab63fac517c8..f39b37180c414deb6d71c0ab5d674f89958630c0 100644 (file)
@@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
 }
 
 void __skb_get_hash(struct sk_buff *skb);
+u32 __skb_get_hash_symmetric(struct sk_buff *skb);
 u32 skb_get_poff(const struct sk_buff *skb);
 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
                   const struct flow_keys *keys, int hlen);
@@ -2869,6 +2870,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
                skb->csum = csum_partial(start, len, skb->csum);
 }
 
+/**
+ *     skb_push_rcsum - push skb and update receive checksum
+ *     @skb: buffer to update
+ *     @len: length of data pulled
+ *
+ *     This function performs an skb_push on the packet and updates
+ *     the CHECKSUM_COMPLETE checksum.  It should be used on
+ *     receive path processing instead of skb_push unless you know
+ *     that the checksum difference is zero (e.g., a valid IP header)
+ *     or you are setting ip_summed to CHECKSUM_NONE.
+ */
+static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
+                                           unsigned int len)
+{
+       skb_push(skb, len);
+       skb_postpush_rcsum(skb, skb->data, len);
+       return skb->data;
+}
+
 /**
  *     pskb_trim_rcsum - trim received skb and update checksum
  *     @skb: buffer to trim
index 8b3ac0d718ebd7fd80987f6ede10725358102e1d..0d9848de677d96eb186f71c578dd6031f6c47611 100644 (file)
@@ -6,6 +6,7 @@
 #endif
 
 #include <asm/processor.h>     /* for cpu_relax() */
+#include <asm/barrier.h>
 
 /*
  * include/linux/spinlock_up.h - UP-debug version of spinlocks.
 #ifdef CONFIG_DEBUG_SPINLOCK
 #define arch_spin_is_locked(x)         ((x)->slock == 0)
 
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       smp_cond_load_acquire(&lock->slock, VAL);
+}
+
 static inline void arch_spin_lock(arch_spinlock_t *lock)
 {
        lock->slock = 0;
@@ -67,6 +73,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 
 #else /* DEBUG_SPINLOCK */
 #define arch_spin_is_locked(lock)      ((void)(lock), 0)
+#define arch_spin_unlock_wait(lock)    do { barrier(); (void)(lock); } while (0)
 /* for sched/core.c and kernel_lock.c: */
 # define arch_spin_lock(lock)          do { barrier(); (void)(lock); } while (0)
 # define arch_spin_lock_flags(lock, flags)     do { barrier(); (void)(lock); } while (0)
@@ -79,7 +86,4 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
 #define arch_read_can_lock(lock)       (((void)(lock), 1))
 #define arch_write_can_lock(lock)      (((void)(lock), 1))
 
-#define arch_spin_unlock_wait(lock) \
-               do { cpu_relax(); } while (arch_spin_is_locked(lock))
-
 #endif /* __LINUX_SPINLOCK_UP_H */
index 297f09f23896d2db41cd90a0cad37746e6ed77aa..4cea09d9420803469a991cf7038864a92982b0c0 100644 (file)
@@ -205,7 +205,20 @@ struct tm {
        int tm_yday;
 };
 
-void time_to_tm(time_t totalsecs, int offset, struct tm *result);
+void time64_to_tm(time64_t totalsecs, int offset, struct tm *result);
+
+/**
+ * time_to_tm - converts the calendar time to local broken-down time
+ *
+ * @totalsecs  the number of seconds elapsed since 00:00:00 on January 1, 1970,
+ *             Coordinated Universal Time (UTC).
+ * @offset     offset seconds adding to totalsecs.
+ * @result     pointer to struct tm variable to receive broken-down time
+ */
+static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
+{
+       time64_to_tm(totalsecs, offset, result);
+}
 
 /**
  * timespec_to_ns - Convert timespec to nanoseconds
index 20ac746f3eb305905376e9743b90a2f78cf76b57..4419506b564ecc215b9550b9e1bf4ed4ccbe6e29 100644 (file)
@@ -19,7 +19,6 @@ struct timer_list {
        void                    (*function)(unsigned long);
        unsigned long           data;
        u32                     flags;
-       int                     slack;
 
 #ifdef CONFIG_TIMER_STATS
        int                     start_pid;
@@ -58,11 +57,14 @@ struct timer_list {
  * workqueue locking issues. It's not meant for executing random crap
  * with interrupts disabled. Abuse is monitored!
  */
-#define TIMER_CPUMASK          0x0007FFFF
-#define TIMER_MIGRATING                0x00080000
+#define TIMER_CPUMASK          0x0003FFFF
+#define TIMER_MIGRATING                0x00040000
 #define TIMER_BASEMASK         (TIMER_CPUMASK | TIMER_MIGRATING)
-#define TIMER_DEFERRABLE       0x00100000
+#define TIMER_DEFERRABLE       0x00080000
+#define TIMER_PINNED           0x00100000
 #define TIMER_IRQSAFE          0x00200000
+#define TIMER_ARRAYSHIFT       22
+#define TIMER_ARRAYMASK                0xFFC00000
 
 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
                .entry = { .next = TIMER_ENTRY_STATIC },        \
@@ -70,7 +72,6 @@ struct timer_list {
                .expires = (_expires),                          \
                .data = (_data),                                \
                .flags = (_flags),                              \
-               .slack = -1,                                    \
                __TIMER_LOCKDEP_MAP_INITIALIZER(                \
                        __FILE__ ":" __stringify(__LINE__))     \
        }
@@ -78,9 +79,15 @@ struct timer_list {
 #define TIMER_INITIALIZER(_function, _expires, _data)          \
        __TIMER_INITIALIZER((_function), (_expires), (_data), 0)
 
+#define TIMER_PINNED_INITIALIZER(_function, _expires, _data)   \
+       __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_PINNED)
+
 #define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \
        __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE)
 
+#define TIMER_PINNED_DEFERRED_INITIALIZER(_function, _expires, _data)  \
+       __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE | TIMER_PINNED)
+
 #define DEFINE_TIMER(_name, _function, _expires, _data)                \
        struct timer_list _name =                               \
                TIMER_INITIALIZER(_function, _expires, _data)
@@ -124,8 +131,12 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
 
 #define init_timer(timer)                                              \
        __init_timer((timer), 0)
+#define init_timer_pinned(timer)                                       \
+       __init_timer((timer), TIMER_PINNED)
 #define init_timer_deferrable(timer)                                   \
        __init_timer((timer), TIMER_DEFERRABLE)
+#define init_timer_pinned_deferrable(timer)                            \
+       __init_timer((timer), TIMER_DEFERRABLE | TIMER_PINNED)
 #define init_timer_on_stack(timer)                                     \
        __init_timer_on_stack((timer), 0)
 
@@ -145,12 +156,20 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
 
 #define setup_timer(timer, fn, data)                                   \
        __setup_timer((timer), (fn), (data), 0)
+#define setup_pinned_timer(timer, fn, data)                            \
+       __setup_timer((timer), (fn), (data), TIMER_PINNED)
 #define setup_deferrable_timer(timer, fn, data)                                \
        __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer(timer, fn, data)                 \
+       __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
 #define setup_timer_on_stack(timer, fn, data)                          \
        __setup_timer_on_stack((timer), (fn), (data), 0)
+#define setup_pinned_timer_on_stack(timer, fn, data)                   \
+       __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
 #define setup_deferrable_timer_on_stack(timer, fn, data)               \
        __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
+#define setup_pinned_deferrable_timer_on_stack(timer, fn, data)                \
+       __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
 
 /**
  * timer_pending - is a timer pending?
@@ -171,12 +190,7 @@ extern void add_timer_on(struct timer_list *timer, int cpu);
 extern int del_timer(struct timer_list * timer);
 extern int mod_timer(struct timer_list *timer, unsigned long expires);
 extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
-extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires);
-
-extern void set_timer_slack(struct timer_list *time, int slack_hz);
 
-#define TIMER_NOT_PINNED       0
-#define TIMER_PINNED           1
 /*
  * The jiffies value which is added to now, when there is no timer
  * in the timer wheel:
index 7759fc3c622d4af42753175a89b7dad0bd6898ec..6685a73736a2ecb77ce4bbec47e85b9dc207c332 100644 (file)
        do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0)
 
 /* Definitions for online/offline exerciser. */
+bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes,
+                    unsigned long *sum_offl, int *min_onl, int *max_onl);
+bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
+                   unsigned long *sum_onl, int *min_onl, int *max_onl);
 int torture_onoff_init(long ooholdoff, long oointerval);
 void torture_onoff_stats(void);
 bool torture_onoff_failures(void);
index fefe8b06a63dbc15c28b9c09a76e14c03f6ae1e2..612dbdfa388ed29db4620149954b7ee46603d393 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/workqueue.h>
 #include <linux/usb/ch9.h>
 
+#define UDC_TRACE_STR_MAX      512
+
 struct usb_ep;
 
 /**
@@ -228,307 +230,49 @@ struct usb_ep {
 
 /*-------------------------------------------------------------------------*/
 
-/**
- * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint
- * @ep:the endpoint being configured
- * @maxpacket_limit:value of maximum packet size limit
- *
- * This function should be used only in UDC drivers to initialize endpoint
- * (usually in probe function).
- */
+#if IS_ENABLED(CONFIG_USB_GADGET)
+void usb_ep_set_maxpacket_limit(struct usb_ep *ep, unsigned maxpacket_limit);
+int usb_ep_enable(struct usb_ep *ep);
+int usb_ep_disable(struct usb_ep *ep);
+struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, gfp_t gfp_flags);
+void usb_ep_free_request(struct usb_ep *ep, struct usb_request *req);
+int usb_ep_queue(struct usb_ep *ep, struct usb_request *req, gfp_t gfp_flags);
+int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req);
+int usb_ep_set_halt(struct usb_ep *ep);
+int usb_ep_clear_halt(struct usb_ep *ep);
+int usb_ep_set_wedge(struct usb_ep *ep);
+int usb_ep_fifo_status(struct usb_ep *ep);
+void usb_ep_fifo_flush(struct usb_ep *ep);
+#else
 static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
-                                             unsigned maxpacket_limit)
-{
-       ep->maxpacket_limit = maxpacket_limit;
-       ep->maxpacket = maxpacket_limit;
-}
-
-/**
- * usb_ep_enable - configure endpoint, making it usable
- * @ep:the endpoint being configured.  may not be the endpoint named "ep0".
- *     drivers discover endpoints through the ep_list of a usb_gadget.
- *
- * When configurations are set, or when interface settings change, the driver
- * will enable or disable the relevant endpoints.  while it is enabled, an
- * endpoint may be used for i/o until the driver receives a disconnect() from
- * the host or until the endpoint is disabled.
- *
- * the ep0 implementation (which calls this routine) must ensure that the
- * hardware capabilities of each endpoint match the descriptor provided
- * for it.  for example, an endpoint named "ep2in-bulk" would be usable
- * for interrupt transfers as well as bulk, but it likely couldn't be used
- * for iso transfers or for endpoint 14.  some endpoints are fully
- * configurable, with more generic names like "ep-a".  (remember that for
- * USB, "in" means "towards the USB master".)
- *
- * returns zero, or a negative error code.
- */
+               unsigned maxpacket_limit)
+{ }
 static inline int usb_ep_enable(struct usb_ep *ep)
-{
-       int ret;
-
-       if (ep->enabled)
-               return 0;
-
-       ret = ep->ops->enable(ep, ep->desc);
-       if (ret)
-               return ret;
-
-       ep->enabled = true;
-
-       return 0;
-}
-
-/**
- * usb_ep_disable - endpoint is no longer usable
- * @ep:the endpoint being unconfigured.  may not be the endpoint named "ep0".
- *
- * no other task may be using this endpoint when this is called.
- * any pending and uncompleted requests will complete with status
- * indicating disconnect (-ESHUTDOWN) before this call returns.
- * gadget drivers must call usb_ep_enable() again before queueing
- * requests to the endpoint.
- *
- * returns zero, or a negative error code.
- */
+{ return 0; }
 static inline int usb_ep_disable(struct usb_ep *ep)
-{
-       int ret;
-
-       if (!ep->enabled)
-               return 0;
-
-       ret = ep->ops->disable(ep);
-       if (ret)
-               return ret;
-
-       ep->enabled = false;
-
-       return 0;
-}
-
-/**
- * usb_ep_alloc_request - allocate a request object to use with this endpoint
- * @ep:the endpoint to be used with with the request
- * @gfp_flags:GFP_* flags to use
- *
- * Request objects must be allocated with this call, since they normally
- * need controller-specific setup and may even need endpoint-specific
- * resources such as allocation of DMA descriptors.
- * Requests may be submitted with usb_ep_queue(), and receive a single
- * completion callback.  Free requests with usb_ep_free_request(), when
- * they are no longer needed.
- *
- * Returns the request, or null if one could not be allocated.
- */
+{ return 0; }
 static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep,
-                                                      gfp_t gfp_flags)
-{
-       return ep->ops->alloc_request(ep, gfp_flags);
-}
-
-/**
- * usb_ep_free_request - frees a request object
- * @ep:the endpoint associated with the request
- * @req:the request being freed
- *
- * Reverses the effect of usb_ep_alloc_request().
- * Caller guarantees the request is not queued, and that it will
- * no longer be requeued (or otherwise used).
- */
+               gfp_t gfp_flags)
+{ return NULL; }
 static inline void usb_ep_free_request(struct usb_ep *ep,
-                                      struct usb_request *req)
-{
-       ep->ops->free_request(ep, req);
-}
-
-/**
- * usb_ep_queue - queues (submits) an I/O request to an endpoint.
- * @ep:the endpoint associated with the request
- * @req:the request being submitted
- * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't
- *     pre-allocate all necessary memory with the request.
- *
- * This tells the device controller to perform the specified request through
- * that endpoint (reading or writing a buffer).  When the request completes,
- * including being canceled by usb_ep_dequeue(), the request's completion
- * routine is called to return the request to the driver.  Any endpoint
- * (except control endpoints like ep0) may have more than one transfer
- * request queued; they complete in FIFO order.  Once a gadget driver
- * submits a request, that request may not be examined or modified until it
- * is given back to that driver through the completion callback.
- *
- * Each request is turned into one or more packets.  The controller driver
- * never merges adjacent requests into the same packet.  OUT transfers
- * will sometimes use data that's already buffered in the hardware.
- * Drivers can rely on the fact that the first byte of the request's buffer
- * always corresponds to the first byte of some USB packet, for both
- * IN and OUT transfers.
- *
- * Bulk endpoints can queue any amount of data; the transfer is packetized
- * automatically.  The last packet will be short if the request doesn't fill it
- * out completely.  Zero length packets (ZLPs) should be avoided in portable
- * protocols since not all usb hardware can successfully handle zero length
- * packets.  (ZLPs may be explicitly written, and may be implicitly written if
- * the request 'zero' flag is set.)  Bulk endpoints may also be used
- * for interrupt transfers; but the reverse is not true, and some endpoints
- * won't support every interrupt transfer.  (Such as 768 byte packets.)
- *
- * Interrupt-only endpoints are less functional than bulk endpoints, for
- * example by not supporting queueing or not handling buffers that are
- * larger than the endpoint's maxpacket size.  They may also treat data
- * toggle differently.
- *
- * Control endpoints ... after getting a setup() callback, the driver queues
- * one response (even if it would be zero length).  That enables the
- * status ack, after transferring data as specified in the response.  Setup
- * functions may return negative error codes to generate protocol stalls.
- * (Note that some USB device controllers disallow protocol stall responses
- * in some cases.)  When control responses are deferred (the response is
- * written after the setup callback returns), then usb_ep_set_halt() may be
- * used on ep0 to trigger protocol stalls.  Depending on the controller,
- * it may not be possible to trigger a status-stage protocol stall when the
- * data stage is over, that is, from within the response's completion
- * routine.
- *
- * For periodic endpoints, like interrupt or isochronous ones, the usb host
- * arranges to poll once per interval, and the gadget driver usually will
- * have queued some data to transfer at that time.
- *
- * Returns zero, or a negative error code.  Endpoints that are not enabled
- * report errors; errors will also be
- * reported when the usb peripheral is disconnected.
- */
-static inline int usb_ep_queue(struct usb_ep *ep,
-                              struct usb_request *req, gfp_t gfp_flags)
-{
-       if (WARN_ON_ONCE(!ep->enabled && ep->address))
-               return -ESHUTDOWN;
-
-       return ep->ops->queue(ep, req, gfp_flags);
-}
-
-/**
- * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint
- * @ep:the endpoint associated with the request
- * @req:the request being canceled
- *
- * If the request is still active on the endpoint, it is dequeued and its
- * completion routine is called (with status -ECONNRESET); else a negative
- * error code is returned. This is guaranteed to happen before the call to
- * usb_ep_dequeue() returns.
- *
- * Note that some hardware can't clear out write fifos (to unlink the request
- * at the head of the queue) except as part of disconnecting from usb. Such
- * restrictions prevent drivers from supporting configuration changes,
- * even to configuration zero (a "chapter 9" requirement).
- */
+               struct usb_request *req)
+{ }
+static inline int usb_ep_queue(struct usb_ep *ep, struct usb_request *req,
+               gfp_t gfp_flags)
+{ return 0; }
 static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
-{
-       return ep->ops->dequeue(ep, req);
-}
-
-/**
- * usb_ep_set_halt - sets the endpoint halt feature.
- * @ep: the non-isochronous endpoint being stalled
- *
- * Use this to stall an endpoint, perhaps as an error report.
- * Except for control endpoints,
- * the endpoint stays halted (will not stream any data) until the host
- * clears this feature; drivers may need to empty the endpoint's request
- * queue first, to make sure no inappropriate transfers happen.
- *
- * Note that while an endpoint CLEAR_FEATURE will be invisible to the
- * gadget driver, a SET_INTERFACE will not be.  To reset endpoints for the
- * current altsetting, see usb_ep_clear_halt().  When switching altsettings,
- * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints.
- *
- * Returns zero, or a negative error code.  On success, this call sets
- * underlying hardware state that blocks data transfers.
- * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any
- * transfer requests are still queued, or if the controller hardware
- * (usually a FIFO) still holds bytes that the host hasn't collected.
- */
+{ return 0; }
 static inline int usb_ep_set_halt(struct usb_ep *ep)
-{
-       return ep->ops->set_halt(ep, 1);
-}
-
-/**
- * usb_ep_clear_halt - clears endpoint halt, and resets toggle
- * @ep:the bulk or interrupt endpoint being reset
- *
- * Use this when responding to the standard usb "set interface" request,
- * for endpoints that aren't reconfigured, after clearing any other state
- * in the endpoint's i/o queue.
- *
- * Returns zero, or a negative error code.  On success, this call clears
- * the underlying hardware state reflecting endpoint halt and data toggle.
- * Note that some hardware can't support this request (like pxa2xx_udc),
- * and accordingly can't correctly implement interface altsettings.
- */
+{ return 0; }
 static inline int usb_ep_clear_halt(struct usb_ep *ep)
-{
-       return ep->ops->set_halt(ep, 0);
-}
-
-/**
- * usb_ep_set_wedge - sets the halt feature and ignores clear requests
- * @ep: the endpoint being wedged
- *
- * Use this to stall an endpoint and ignore CLEAR_FEATURE(HALT_ENDPOINT)
- * requests. If the gadget driver clears the halt status, it will
- * automatically unwedge the endpoint.
- *
- * Returns zero on success, else negative errno.
- */
-static inline int
-usb_ep_set_wedge(struct usb_ep *ep)
-{
-       if (ep->ops->set_wedge)
-               return ep->ops->set_wedge(ep);
-       else
-               return ep->ops->set_halt(ep, 1);
-}
-
-/**
- * usb_ep_fifo_status - returns number of bytes in fifo, or error
- * @ep: the endpoint whose fifo status is being checked.
- *
- * FIFO endpoints may have "unclaimed data" in them in certain cases,
- * such as after aborted transfers.  Hosts may not have collected all
- * the IN data written by the gadget driver (and reported by a request
- * completion).  The gadget driver may not have collected all the data
- * written OUT to it by the host.  Drivers that need precise handling for
- * fault reporting or recovery may need to use this call.
- *
- * This returns the number of such bytes in the fifo, or a negative
- * errno if the endpoint doesn't use a FIFO or doesn't support such
- * precise handling.
- */
+{ return 0; }
+static inline int usb_ep_set_wedge(struct usb_ep *ep)
+{ return 0; }
 static inline int usb_ep_fifo_status(struct usb_ep *ep)
-{
-       if (ep->ops->fifo_status)
-               return ep->ops->fifo_status(ep);
-       else
-               return -EOPNOTSUPP;
-}
-
-/**
- * usb_ep_fifo_flush - flushes contents of a fifo
- * @ep: the endpoint whose fifo is being flushed.
- *
- * This call may be used to flush the "unclaimed data" that may exist in
- * an endpoint fifo after abnormal transaction terminations.  The call
- * must never be used except when endpoint is not being used for any
- * protocol translation.
- */
+{ return 0; }
 static inline void usb_ep_fifo_flush(struct usb_ep *ep)
-{
-       if (ep->ops->fifo_flush)
-               ep->ops->fifo_flush(ep);
-}
-
+{ }
+#endif /* USB_GADGET */
 
 /*-------------------------------------------------------------------------*/
 
@@ -582,6 +326,7 @@ struct usb_gadget_ops {
  * @dev: Driver model state for this abstract device.
  * @out_epnum: last used out ep number
  * @in_epnum: last used in ep number
+ * @mA: last set mA value
  * @otg_caps: OTG capabilities of this gadget.
  * @sg_supported: true if we can handle scatter-gather
  * @is_otg: True if the USB device port uses a Mini-AB jack, so that the
@@ -638,6 +383,7 @@ struct usb_gadget {
        struct device                   dev;
        unsigned                        out_epnum;
        unsigned                        in_epnum;
+       unsigned                        mA;
        struct usb_otg_caps             *otg_caps;
 
        unsigned                        sg_supported:1;
@@ -760,251 +506,44 @@ static inline int gadget_is_otg(struct usb_gadget *g)
 #endif
 }
 
-/**
- * usb_gadget_frame_number - returns the current frame number
- * @gadget: controller that reports the frame number
- *
- * Returns the usb frame number, normally eleven bits from a SOF packet,
- * or negative errno if this device doesn't support this capability.
- */
-static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
-{
-       return gadget->ops->get_frame(gadget);
-}
+/*-------------------------------------------------------------------------*/
 
-/**
- * usb_gadget_wakeup - tries to wake up the host connected to this gadget
- * @gadget: controller used to wake up the host
- *
- * Returns zero on success, else negative error code if the hardware
- * doesn't support such attempts, or its support has not been enabled
- * by the usb host.  Drivers must return device descriptors that report
- * their ability to support this, or hosts won't enable it.
- *
- * This may also try to use SRP to wake the host and start enumeration,
- * even if OTG isn't otherwise in use.  OTG devices may also start
- * remote wakeup even when hosts don't explicitly enable it.
- */
+#if IS_ENABLED(CONFIG_USB_GADGET)
+int usb_gadget_frame_number(struct usb_gadget *gadget);
+int usb_gadget_wakeup(struct usb_gadget *gadget);
+int usb_gadget_set_selfpowered(struct usb_gadget *gadget);
+int usb_gadget_clear_selfpowered(struct usb_gadget *gadget);
+int usb_gadget_vbus_connect(struct usb_gadget *gadget);
+int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA);
+int usb_gadget_vbus_disconnect(struct usb_gadget *gadget);
+int usb_gadget_connect(struct usb_gadget *gadget);
+int usb_gadget_disconnect(struct usb_gadget *gadget);
+int usb_gadget_deactivate(struct usb_gadget *gadget);
+int usb_gadget_activate(struct usb_gadget *gadget);
+#else
+static inline int usb_gadget_frame_number(struct usb_gadget *gadget)
+{ return 0; }
 static inline int usb_gadget_wakeup(struct usb_gadget *gadget)
-{
-       if (!gadget->ops->wakeup)
-               return -EOPNOTSUPP;
-       return gadget->ops->wakeup(gadget);
-}
-
-/**
- * usb_gadget_set_selfpowered - sets the device selfpowered feature.
- * @gadget:the device being declared as self-powered
- *
- * this affects the device status reported by the hardware driver
- * to reflect that it now has a local power supply.
- *
- * returns zero on success, else negative errno.
- */
+{ return 0; }
 static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget)
-{
-       if (!gadget->ops->set_selfpowered)
-               return -EOPNOTSUPP;
-       return gadget->ops->set_selfpowered(gadget, 1);
-}
-
-/**
- * usb_gadget_clear_selfpowered - clear the device selfpowered feature.
- * @gadget:the device being declared as bus-powered
- *
- * this affects the device status reported by the hardware driver.
- * some hardware may not support bus-powered operation, in which
- * case this feature's value can never change.
- *
- * returns zero on success, else negative errno.
- */
+{ return 0; }
 static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget)
-{
-       if (!gadget->ops->set_selfpowered)
-               return -EOPNOTSUPP;
-       return gadget->ops->set_selfpowered(gadget, 0);
-}
-
-/**
- * usb_gadget_vbus_connect - Notify controller that VBUS is powered
- * @gadget:The device which now has VBUS power.
- * Context: can sleep
- *
- * This call is used by a driver for an external transceiver (or GPIO)
- * that detects a VBUS power session starting.  Common responses include
- * resuming the controller, activating the D+ (or D-) pullup to let the
- * host detect that a USB device is attached, and starting to draw power
- * (8mA or possibly more, especially after SET_CONFIGURATION).
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
 static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget)
-{
-       if (!gadget->ops->vbus_session)
-               return -EOPNOTSUPP;
-       return gadget->ops->vbus_session(gadget, 1);
-}
-
-/**
- * usb_gadget_vbus_draw - constrain controller's VBUS power usage
- * @gadget:The device whose VBUS usage is being described
- * @mA:How much current to draw, in milliAmperes.  This should be twice
- *     the value listed in the configuration descriptor bMaxPower field.
- *
- * This call is used by gadget drivers during SET_CONFIGURATION calls,
- * reporting how much power the device may consume.  For example, this
- * could affect how quickly batteries are recharged.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
 static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
-{
-       if (!gadget->ops->vbus_draw)
-               return -EOPNOTSUPP;
-       return gadget->ops->vbus_draw(gadget, mA);
-}
-
-/**
- * usb_gadget_vbus_disconnect - notify controller about VBUS session end
- * @gadget:the device whose VBUS supply is being described
- * Context: can sleep
- *
- * This call is used by a driver for an external transceiver (or GPIO)
- * that detects a VBUS power session ending.  Common responses include
- * reversing everything done in usb_gadget_vbus_connect().
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
 static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
-{
-       if (!gadget->ops->vbus_session)
-               return -EOPNOTSUPP;
-       return gadget->ops->vbus_session(gadget, 0);
-}
-
-/**
- * usb_gadget_connect - software-controlled connect to USB host
- * @gadget:the peripheral being connected
- *
- * Enables the D+ (or potentially D-) pullup.  The host will start
- * enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered).  This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
 static inline int usb_gadget_connect(struct usb_gadget *gadget)
-{
-       int ret;
-
-       if (!gadget->ops->pullup)
-               return -EOPNOTSUPP;
-
-       if (gadget->deactivated) {
-               /*
-                * If gadget is deactivated we only save new state.
-                * Gadget will be connected automatically after activation.
-                */
-               gadget->connected = true;
-               return 0;
-       }
-
-       ret = gadget->ops->pullup(gadget, 1);
-       if (!ret)
-               gadget->connected = 1;
-       return ret;
-}
-
-/**
- * usb_gadget_disconnect - software-controlled disconnect from USB host
- * @gadget:the peripheral being disconnected
- *
- * Disables the D+ (or potentially D-) pullup, which the host may see
- * as a disconnect (when a VBUS session is active).  Not all systems
- * support software pullup controls.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
 static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
-{
-       int ret;
-
-       if (!gadget->ops->pullup)
-               return -EOPNOTSUPP;
-
-       if (gadget->deactivated) {
-               /*
-                * If gadget is deactivated we only save new state.
-                * Gadget will stay disconnected after activation.
-                */
-               gadget->connected = false;
-               return 0;
-       }
-
-       ret = gadget->ops->pullup(gadget, 0);
-       if (!ret)
-               gadget->connected = 0;
-       return ret;
-}
-
-/**
- * usb_gadget_deactivate - deactivate function which is not ready to work
- * @gadget: the peripheral being deactivated
- *
- * This routine may be used during the gadget driver bind() call to prevent
- * the peripheral from ever being visible to the USB host, unless later
- * usb_gadget_activate() is called.  For example, user mode components may
- * need to be activated before the system can talk to hosts.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
 static inline int usb_gadget_deactivate(struct usb_gadget *gadget)
-{
-       int ret;
-
-       if (gadget->deactivated)
-               return 0;
-
-       if (gadget->connected) {
-               ret = usb_gadget_disconnect(gadget);
-               if (ret)
-                       return ret;
-               /*
-                * If gadget was being connected before deactivation, we want
-                * to reconnect it in usb_gadget_activate().
-                */
-               gadget->connected = true;
-       }
-       gadget->deactivated = true;
-
-       return 0;
-}
-
-/**
- * usb_gadget_activate - activate function which is not ready to work
- * @gadget: the peripheral being activated
- *
- * This routine activates gadget which was previously deactivated with
- * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed.
- *
- * Returns zero on success, else negative errno.
- */
+{ return 0; }
 static inline int usb_gadget_activate(struct usb_gadget *gadget)
-{
-       if (!gadget->deactivated)
-               return 0;
-
-       gadget->deactivated = false;
-
-       /*
-        * If gadget has been connected before deactivation, or became connected
-        * while it was being deactivated, we call usb_gadget_connect().
-        */
-       if (gadget->connected)
-               return usb_gadget_connect(gadget);
-
-       return 0;
-}
+{ return 0; }
+#endif /* CONFIG_USB_GADGET */
 
 /*-------------------------------------------------------------------------*/
 
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
deleted file mode 100644 (file)
index 8c8f685..0000000
+++ /dev/null
@@ -1,200 +0,0 @@
-/* linux/include/asm-arm/arch-msm/hsusb.h
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Brian Swetland <swetland@google.com>
- * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef __ASM_ARCH_MSM_HSUSB_H
-#define __ASM_ARCH_MSM_HSUSB_H
-
-#include <linux/extcon.h>
-#include <linux/types.h>
-#include <linux/usb/otg.h>
-#include <linux/clk.h>
-
-/**
- * OTG control
- *
- * OTG_NO_CONTROL      Id/VBUS notifications not required. Useful in host
- *                      only configuration.
- * OTG_PHY_CONTROL     Id/VBUS notifications comes form USB PHY.
- * OTG_PMIC_CONTROL    Id/VBUS notifications comes from PMIC hardware.
- * OTG_USER_CONTROL    Id/VBUS notifcations comes from User via sysfs.
- *
- */
-enum otg_control_type {
-       OTG_NO_CONTROL = 0,
-       OTG_PHY_CONTROL,
-       OTG_PMIC_CONTROL,
-       OTG_USER_CONTROL,
-};
-
-/**
- * PHY used in
- *
- * INVALID_PHY                 Unsupported PHY
- * CI_45NM_INTEGRATED_PHY      Chipidea 45nm integrated PHY
- * SNPS_28NM_INTEGRATED_PHY    Synopsis 28nm integrated PHY
- *
- */
-enum msm_usb_phy_type {
-       INVALID_PHY = 0,
-       CI_45NM_INTEGRATED_PHY,
-       SNPS_28NM_INTEGRATED_PHY,
-};
-
-#define IDEV_CHG_MAX   1500
-#define IUNIT          100
-
-/**
- * Different states involved in USB charger detection.
- *
- * USB_CHG_STATE_UNDEFINED     USB charger is not connected or detection
- *                              process is not yet started.
- * USB_CHG_STATE_WAIT_FOR_DCD  Waiting for Data pins contact.
- * USB_CHG_STATE_DCD_DONE      Data pin contact is detected.
- * USB_CHG_STATE_PRIMARY_DONE  Primary detection is completed (Detects
- *                              between SDP and DCP/CDP).
- * USB_CHG_STATE_SECONDARY_DONE        Secondary detection is completed (Detects
- *                              between DCP and CDP).
- * USB_CHG_STATE_DETECTED      USB charger type is determined.
- *
- */
-enum usb_chg_state {
-       USB_CHG_STATE_UNDEFINED = 0,
-       USB_CHG_STATE_WAIT_FOR_DCD,
-       USB_CHG_STATE_DCD_DONE,
-       USB_CHG_STATE_PRIMARY_DONE,
-       USB_CHG_STATE_SECONDARY_DONE,
-       USB_CHG_STATE_DETECTED,
-};
-
-/**
- * USB charger types
- *
- * USB_INVALID_CHARGER Invalid USB charger.
- * USB_SDP_CHARGER     Standard downstream port. Refers to a downstream port
- *                      on USB2.0 compliant host/hub.
- * USB_DCP_CHARGER     Dedicated charger port (AC charger/ Wall charger).
- * USB_CDP_CHARGER     Charging downstream port. Enumeration can happen and
- *                      IDEV_CHG_MAX can be drawn irrespective of USB state.
- *
- */
-enum usb_chg_type {
-       USB_INVALID_CHARGER = 0,
-       USB_SDP_CHARGER,
-       USB_DCP_CHARGER,
-       USB_CDP_CHARGER,
-};
-
-/**
- * struct msm_otg_platform_data - platform device data
- *              for msm_otg driver.
- * @phy_init_seq: PHY configuration sequence values. Value of -1 is reserved as
- *              "do not overwrite default vaule at this address".
- * @phy_init_sz: PHY configuration sequence size.
- * @vbus_power: VBUS power on/off routine.
- * @power_budget: VBUS power budget in mA (0 will be treated as 500mA).
- * @mode: Supported mode (OTG/peripheral/host).
- * @otg_control: OTG switch controlled by user/Id pin
- */
-struct msm_otg_platform_data {
-       int *phy_init_seq;
-       int phy_init_sz;
-       void (*vbus_power)(bool on);
-       unsigned power_budget;
-       enum usb_dr_mode mode;
-       enum otg_control_type otg_control;
-       enum msm_usb_phy_type phy_type;
-       void (*setup_gpio)(enum usb_otg_state state);
-};
-
-/**
- * struct msm_usb_cable - structure for exteternal connector cable
- *                       state tracking
- * @nb: hold event notification callback
- * @conn: used for notification registration
- */
-struct msm_usb_cable {
-       struct notifier_block           nb;
-       struct extcon_dev               *extcon;
-};
-
-/**
- * struct msm_otg: OTG driver data. Shared by HCD and DCD.
- * @otg: USB OTG Transceiver structure.
- * @pdata: otg device platform data.
- * @irq: IRQ number assigned for HSUSB controller.
- * @clk: clock struct of usb_hs_clk.
- * @pclk: clock struct of usb_hs_pclk.
- * @core_clk: clock struct of usb_hs_core_clk.
- * @regs: ioremapped register base address.
- * @inputs: OTG state machine inputs(Id, SessValid etc).
- * @sm_work: OTG state machine work.
- * @in_lpm: indicates low power mode (LPM) state.
- * @async_int: Async interrupt arrived.
- * @cur_power: The amount of mA available from downstream port.
- * @chg_work: Charger detection work.
- * @chg_state: The state of charger detection process.
- * @chg_type: The type of charger attached.
- * @dcd_retires: The retry count used to track Data contact
- *               detection process.
- * @manual_pullup: true if VBUS is not routed to USB controller/phy
- *     and controller driver therefore enables pull-up explicitly before
- *     starting controller using usbcmd run/stop bit.
- * @vbus: VBUS signal state trakining, using extcon framework
- * @id: ID signal state trakining, using extcon framework
- * @switch_gpio: Descriptor for GPIO used to control external Dual
- *               SPDT USB Switch.
- * @reboot: Used to inform the driver to route USB D+/D- line to Device
- *         connector
- */
-struct msm_otg {
-       struct usb_phy phy;
-       struct msm_otg_platform_data *pdata;
-       int irq;
-       struct clk *clk;
-       struct clk *pclk;
-       struct clk *core_clk;
-       void __iomem *regs;
-#define ID             0
-#define B_SESS_VLD     1
-       unsigned long inputs;
-       struct work_struct sm_work;
-       atomic_t in_lpm;
-       int async_int;
-       unsigned cur_power;
-       int phy_number;
-       struct delayed_work chg_work;
-       enum usb_chg_state chg_state;
-       enum usb_chg_type chg_type;
-       u8 dcd_retries;
-       struct regulator *v3p3;
-       struct regulator *v1p8;
-       struct regulator *vddcx;
-
-       struct reset_control *phy_rst;
-       struct reset_control *link_rst;
-       int vdd_levels[3];
-
-       bool manual_pullup;
-
-       struct msm_usb_cable vbus;
-       struct msm_usb_cable id;
-
-       struct gpio_desc *switch_gpio;
-       struct notifier_block reboot;
-};
-
-#endif
index de3237fce6b2d82e9a1358724a32b9a7fcc91b87..5ff9032ee1b47b8cb8a7b969e93829ce63d1deb1 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/usb/phy.h>
 
 #if IS_ENABLED(CONFIG_OF)
-enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *phy_np);
+enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0);
 bool of_usb_host_tpl_support(struct device_node *np);
 int of_usb_update_otg_caps(struct device_node *np,
                        struct usb_otg_caps *otg_caps);
@@ -20,7 +20,7 @@ struct device_node *usb_of_get_child_node(struct device_node *parent,
                        int portnum);
 #else
 static inline enum usb_dr_mode
-of_usb_get_dr_mode_by_phy(struct device_node *phy_np)
+of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
 {
        return USB_DR_MODE_UNKNOWN;
 }
diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h
deleted file mode 100644 (file)
index 376654b..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- */
-
-#ifndef __USB_CORE_XHCI_PDRIVER_H
-#define __USB_CORE_XHCI_PDRIVER_H
-
-/**
- * struct usb_xhci_pdata - platform_data for generic xhci platform driver
- *
- * @usb3_lpm_capable:  determines if this xhci platform supports USB3
- *                     LPM capability
- *
- */
-struct usb_xhci_pdata {
-       unsigned        usb3_lpm_capable:1;
-};
-
-#endif /* __USB_CORE_XHCI_PDRIVER_H */
index 8d7634247fb4e884b65b1236a858ac8cd62477f0..6abd24f258bc1100e97f989714903c404ea75c42 100644 (file)
@@ -45,7 +45,7 @@ void poke_blanked_console(void);
 int con_font_op(struct vc_data *vc, struct console_font_op *op);
 int con_set_cmap(unsigned char __user *cmap);
 int con_get_cmap(unsigned char __user *cmap);
-void scrollback(struct vc_data *vc, int lines);
+void scrollback(struct vc_data *vc);
 void scrollfront(struct vc_data *vc, int lines);
 void clear_buffer_attributes(struct vc_data *vc);
 void update_region(struct vc_data *vc, unsigned long start, int count);
@@ -59,14 +59,13 @@ int tioclinux(struct tty_struct *tty, unsigned long arg);
 #ifdef CONFIG_CONSOLE_TRANSLATIONS
 /* consolemap.c */
 
-struct unimapinit;
 struct unipair;
 
 int con_set_trans_old(unsigned char __user * table);
 int con_get_trans_old(unsigned char __user * table);
 int con_set_trans_new(unsigned short __user * table);
 int con_get_trans_new(unsigned short __user * table);
-int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui);
+int con_clear_unimap(struct vc_data *vc);
 int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list);
 int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, struct unipair __user *list);
 int con_set_default_unimap(struct vc_data *vc);
@@ -92,7 +91,7 @@ static inline int con_get_trans_new(unsigned short __user *table)
 {
        return -EINVAL;
 }
-static inline int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui)
+static inline int con_clear_unimap(struct vc_data *vc)
 {
        return 0;
 }
index fa2196990f84b2db9d77883b0bb7d7f329d0cc8e..aa9bfea8804a0c74d2d6e0be1bac5eb4f3f119c0 100644 (file)
@@ -12,11 +12,9 @@ struct task_struct;
 /*
  * vtime_accounting_cpu_enabled() definitions/declarations
  */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
 static inline bool vtime_accounting_cpu_enabled(void) { return true; }
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
 /*
  * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
  * in that case and compute the tickless cputime.
@@ -37,11 +35,9 @@ static inline bool vtime_accounting_cpu_enabled(void)
 
        return false;
 }
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
 static inline bool vtime_accounting_cpu_enabled(void) { return false; }
-#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
+#endif
 
 
 /*
@@ -64,35 +60,15 @@ extern void vtime_account_system(struct task_struct *tsk);
 extern void vtime_account_idle(struct task_struct *tsk);
 extern void vtime_account_user(struct task_struct *tsk);
 
-#ifdef __ARCH_HAS_VTIME_ACCOUNT
-extern void vtime_account_irq_enter(struct task_struct *tsk);
-#else
-extern void vtime_common_account_irq_enter(struct task_struct *tsk);
-static inline void vtime_account_irq_enter(struct task_struct *tsk)
-{
-       if (vtime_accounting_cpu_enabled())
-               vtime_common_account_irq_enter(tsk);
-}
-#endif /* __ARCH_HAS_VTIME_ACCOUNT */
-
 #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
 
 static inline void vtime_task_switch(struct task_struct *prev) { }
 static inline void vtime_account_system(struct task_struct *tsk) { }
 static inline void vtime_account_user(struct task_struct *tsk) { }
-static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 extern void arch_vtime_task_switch(struct task_struct *tsk);
-extern void vtime_gen_account_irq_exit(struct task_struct *tsk);
-
-static inline void vtime_account_irq_exit(struct task_struct *tsk)
-{
-       if (vtime_accounting_cpu_enabled())
-               vtime_gen_account_irq_exit(tsk);
-}
-
 extern void vtime_user_enter(struct task_struct *tsk);
 
 static inline void vtime_user_exit(struct task_struct *tsk)
@@ -103,11 +79,6 @@ extern void vtime_guest_enter(struct task_struct *tsk);
 extern void vtime_guest_exit(struct task_struct *tsk);
 extern void vtime_init_idle(struct task_struct *tsk, int cpu);
 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN  */
-static inline void vtime_account_irq_exit(struct task_struct *tsk)
-{
-       /* On hard|softirq exit we always account to hard|softirq cputime */
-       vtime_account_system(tsk);
-}
 static inline void vtime_user_enter(struct task_struct *tsk) { }
 static inline void vtime_user_exit(struct task_struct *tsk) { }
 static inline void vtime_guest_enter(struct task_struct *tsk) { }
@@ -115,6 +86,19 @@ static inline void vtime_guest_exit(struct task_struct *tsk) { }
 static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
 #endif
 
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+extern void vtime_account_irq_enter(struct task_struct *tsk);
+static inline void vtime_account_irq_exit(struct task_struct *tsk)
+{
+       /* On hard|softirq exit we always account to hard|softirq cputime */
+       vtime_account_system(tsk);
+}
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
+static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
+#endif
+
+
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 extern void irqtime_account_irq(struct task_struct *tsk);
 #else
index 791800ddd6d90de23d7b7acea7a91e500e646eda..6360c259da6d62cd3c4b99c858acd6db19eff401 100644 (file)
@@ -34,6 +34,9 @@
 
 #define BOND_DEFAULT_MIIMON    100
 
+#ifndef __long_aligned
+#define __long_aligned __attribute__((aligned((sizeof(long)))))
+#endif
 /*
  * Less bad way to call ioctl from within the kernel; this needs to be
  * done some other way to get the call out of interrupt context.
@@ -138,7 +141,9 @@ struct bond_params {
        struct reciprocal_value reciprocal_packets_per_slave;
        u16 ad_actor_sys_prio;
        u16 ad_user_port_key;
-       u8 ad_actor_system[ETH_ALEN];
+
+       /* 2 bytes of padding : see ether_addr_equal_64bits() */
+       u8 ad_actor_system[ETH_ALEN + 2];
 };
 
 struct bond_parm_tbl {
index 37165fba3741ac68e5a93a8a22473eae70361e45..08f36cd2b874b5493bc9b51c4071722591dcc321 100644 (file)
@@ -313,10 +313,9 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
        return min(dst->dev->mtu, IP_MAX_MTU);
 }
 
-static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
+static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
+                                         const struct sk_buff *skb)
 {
-       struct sock *sk = skb->sk;
-
        if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
                bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
 
index dd78bea227c8b0baf2fd14eb878f26f265ac76e0..b6083c34ef0d4ed0d03f9c030307f335d3a4cba0 100644 (file)
@@ -284,6 +284,14 @@ static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
        return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
 }
 
+/* jiffies until ct expires, 0 if already expired */
+static inline unsigned long nf_ct_expires(const struct nf_conn *ct)
+{
+       long timeout = (long)ct->timeout.expires - (long)jiffies;
+
+       return timeout > 0 ? timeout : 0;
+}
+
 struct kernel_param;
 
 int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
index 649d2a8c17fc36f04b4d317c41c33edd6cdfc8b7..ff5be7e8ddeae6f9d2f9eac889d7abbfbd396bbd 100644 (file)
@@ -1576,7 +1576,13 @@ static inline void sock_put(struct sock *sk)
  */
 void sock_gen_put(struct sock *sk);
 
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested,
+                    unsigned int trim_cap);
+static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+                                const int nested)
+{
+       return __sk_receive_skb(sk, skb, nested, 1);
+}
 
 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
 {
index 985619a593230a6532017e4f61dfaa945b04a08f..1d8e158241da742eb845ac30bab4292cec33d479 100644 (file)
@@ -60,7 +60,7 @@ struct switchdev_attr {
                struct netdev_phys_item_id ppid;        /* PORT_PARENT_ID */
                u8 stp_state;                           /* PORT_STP_STATE */
                unsigned long brport_flags;             /* PORT_BRIDGE_FLAGS */
-               u32 ageing_time;                        /* BRIDGE_AGEING_TIME */
+               clock_t ageing_time;                    /* BRIDGE_AGEING_TIME */
                bool vlan_filtering;                    /* BRIDGE_VLAN_FILTERING */
        } u;
 };
index 8bdae34d1f9add25d968182c6540c431d994c7ce..ec10cfef166afb3a7cdc777ade3903e076d30376 100644 (file)
@@ -245,6 +245,7 @@ endif
 header-y += hw_breakpoint.h
 header-y += l2tp.h
 header-y += libc-compat.h
+header-y += lirc.h
 header-y += limits.h
 header-y += llc.h
 header-y += loop.h
index b0916fc72cce733e760fb5d92b3f510c1cd25514..22e5e589a2747beff6d59f40d44ce99364c85415 100644 (file)
@@ -39,6 +39,7 @@ enum iio_chan_type {
        IIO_RESISTANCE,
        IIO_PH,
        IIO_UVINDEX,
+       IIO_ELECTRICALCONDUCTIVITY,
 };
 
 enum iio_modifier {
index 737fa32faad4b2d6c3639e5b666da7ac039e2d38..d6d071fc3c568249bf70f24602f95770eb1643df 100644 (file)
 #define SW_ROTATE_LOCK         0x0c  /* set = rotate locked/disabled */
 #define SW_LINEIN_INSERT       0x0d  /* set = inserted */
 #define SW_MUTE_DEVICE         0x0e  /* set = device disabled */
+#define SW_PEN_INSERTED                0x0f  /* set = pen inserted */
 #define SW_MAX                 0x0f
 #define SW_CNT                 (SW_MAX+1)
 
index 36ce552cf6a928b7c045db60c3eacdf8e0ae8204..c66a485a24ac81e324ea53ea17b405a3c73b520a 100644 (file)
@@ -276,6 +276,9 @@ enum perf_event_read_format {
 
 /*
  * Hardware event_id to monitor via a performance monitoring event:
+ *
+ * @sample_max_stack: Max number of frame pointers in a callchain,
+ *                   should be < /proc/sys/kernel/perf_event_max_stack
  */
 struct perf_event_attr {
 
@@ -385,7 +388,8 @@ struct perf_event_attr {
         * Wakeup watermark for AUX area
         */
        __u32   aux_watermark;
-       __u32   __reserved_2;   /* align to __u64 */
+       __u16   sample_max_stack;
+       __u16   __reserved_2;   /* align to __u64 */
 };
 
 #define perf_flags(attr)       (*(&(attr)->read_format + 1))
index f755a602d4a176e006dc2cb5830d1c4812bac81f..557bdf10cd44b3ecf9601b33eee4aabca031332d 100644 (file)
@@ -375,9 +375,11 @@ config VIRT_CPU_ACCOUNTING_GEN
 
          If unsure, say N.
 
+endchoice
+
 config IRQ_TIME_ACCOUNTING
        bool "Fine granularity task level IRQ time accounting"
-       depends on HAVE_IRQ_TIME_ACCOUNTING && !NO_HZ_FULL
+       depends on HAVE_IRQ_TIME_ACCOUNTING && !VIRT_CPU_ACCOUNTING_NATIVE
        help
          Select this option to enable fine granularity task irq time
          accounting. This is done by reading a timestamp on each
@@ -386,8 +388,6 @@ config IRQ_TIME_ACCOUNTING
 
          If in doubt, say N here.
 
-endchoice
-
 config BSD_PROCESS_ACCT
        bool "BSD Process Accounting"
        depends on MULTIUSER
@@ -517,6 +517,7 @@ config SRCU
 config TASKS_RCU
        bool
        default n
+       depends on !UML
        select SRCU
        help
          This option enables a task-based RCU implementation that uses
@@ -1458,6 +1459,7 @@ config KALLSYMS_ALL
 
 config KALLSYMS_ABSOLUTE_PERCPU
        bool
+       depends on KALLSYMS
        default X86_64 && SMP
 
 config KALLSYMS_BASE_RELATIVE
index b3757ea0694be8a993d7e60d6c2fe90c9dbe90dc..ae72b3cddc8d866450314466df89d466ee7f2dd2 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -259,16 +259,6 @@ static void sem_rcu_free(struct rcu_head *head)
        ipc_rcu_free(head);
 }
 
-/*
- * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
- * are only control barriers.
- * The code must pair with spin_unlock(&sem->lock) or
- * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
- *
- * smp_rmb() is sufficient, as writes cannot pass the control barrier.
- */
-#define ipc_smp_acquire__after_spin_is_unlocked()      smp_rmb()
-
 /*
  * Wait until all currently ongoing simple ops have completed.
  * Caller must own sem_perm.lock.
@@ -292,7 +282,6 @@ static void sem_wait_array(struct sem_array *sma)
                sem = sma->sem_base + i;
                spin_unlock_wait(&sem->lock);
        }
-       ipc_smp_acquire__after_spin_is_unlocked();
 }
 
 /*
@@ -350,7 +339,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
                         *      complex_count++;
                         *      spin_unlock(sem_perm.lock);
                         */
-                       ipc_smp_acquire__after_spin_is_unlocked();
+                       smp_acquire__after_ctrl_dep();
 
                        /*
                         * Now repeat the test of complex_count:
index 080a2dfb58004c06de7320c05e3404baab46562e..bf4495fcd25d6775f9144564fe16a7fda95a1c67 100644 (file)
@@ -99,7 +99,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
        if (err)
                goto free_smap;
 
-       err = get_callchain_buffers();
+       err = get_callchain_buffers(sysctl_perf_event_max_stack);
        if (err)
                goto free_smap;
 
index d948e44c471ea89aa2953f7a687c6b5783bbdf0e..7b61887f7ccdf57fdcb3083574a56c5580110d73 100644 (file)
@@ -1201,6 +1201,8 @@ static struct cpuhp_step cpuhp_bp_states[] = {
                .teardown               = takedown_cpu,
                .cant_stop              = true,
        },
+#else
+       [CPUHP_BRINGUP_CPU] = { },
 #endif
 };
 
index 179ef46409646fd98ed9528ebd92786ae63dfd71..e9fdb5203de5c0b99bfb992640e89ffe9d4a8160 100644 (file)
@@ -104,7 +104,7 @@ fail:
        return -ENOMEM;
 }
 
-int get_callchain_buffers(void)
+int get_callchain_buffers(int event_max_stack)
 {
        int err = 0;
        int count;
@@ -121,6 +121,15 @@ int get_callchain_buffers(void)
                /* If the allocation failed, give up */
                if (!callchain_cpus_entries)
                        err = -ENOMEM;
+               /*
+                * If requesting per event more than the global cap,
+                * return a different error to help userspace figure
+                * this out.
+                *
+                * And also do it here so that we have &callchain_mutex held.
+                */
+               if (event_max_stack > sysctl_perf_event_max_stack)
+                       err = -EOVERFLOW;
                goto exit;
        }
 
@@ -174,11 +183,12 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
        bool user   = !event->attr.exclude_callchain_user;
        /* Disallow cross-task user callchains. */
        bool crosstask = event->ctx->task && event->ctx->task != current;
+       const u32 max_stack = event->attr.sample_max_stack;
 
        if (!kernel && !user)
                return NULL;
 
-       return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
+       return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true);
 }
 
 struct perf_callchain_entry *
index 85cd41878a7420a790efa31fd1efee98eb1c8b62..79dae188a98760c0085f2930ecf79f137abe43ce 100644 (file)
@@ -335,6 +335,7 @@ static atomic_t perf_sched_count;
 
 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
 static DEFINE_PER_CPU(int, perf_sched_cb_usages);
+static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
 
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -396,6 +397,13 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
        if (ret || !write)
                return ret;
 
+       /*
+        * If throttling is disabled don't allow the write:
+        */
+       if (sysctl_perf_cpu_time_max_percent == 100 ||
+           sysctl_perf_cpu_time_max_percent == 0)
+               return -EINVAL;
+
        max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
        perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
        update_perf_cpu_limits();
@@ -1678,12 +1686,33 @@ static bool is_orphaned_event(struct perf_event *event)
        return event->state == PERF_EVENT_STATE_DEAD;
 }
 
-static inline int pmu_filter_match(struct perf_event *event)
+static inline int __pmu_filter_match(struct perf_event *event)
 {
        struct pmu *pmu = event->pmu;
        return pmu->filter_match ? pmu->filter_match(event) : 1;
 }
 
+/*
+ * Check whether we should attempt to schedule an event group based on
+ * PMU-specific filtering. An event group can consist of HW and SW events,
+ * potentially with a SW leader, so we must check all the filters, to
+ * determine whether a group is schedulable:
+ */
+static inline int pmu_filter_match(struct perf_event *event)
+{
+       struct perf_event *child;
+
+       if (!__pmu_filter_match(event))
+               return 0;
+
+       list_for_each_entry(child, &event->sibling_list, group_entry) {
+               if (!__pmu_filter_match(child))
+                       return 0;
+       }
+
+       return 1;
+}
+
 static inline int
 event_filter_match(struct perf_event *event)
 {
@@ -3665,6 +3694,39 @@ static void free_event_rcu(struct rcu_head *head)
 static void ring_buffer_attach(struct perf_event *event,
                               struct ring_buffer *rb);
 
+static void detach_sb_event(struct perf_event *event)
+{
+       struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
+
+       raw_spin_lock(&pel->lock);
+       list_del_rcu(&event->sb_list);
+       raw_spin_unlock(&pel->lock);
+}
+
+static bool is_sb_event(struct perf_event *event)
+{
+       struct perf_event_attr *attr = &event->attr;
+
+       if (event->parent)
+               return false;
+
+       if (event->attach_state & PERF_ATTACH_TASK)
+               return false;
+
+       if (attr->mmap || attr->mmap_data || attr->mmap2 ||
+           attr->comm || attr->comm_exec ||
+           attr->task ||
+           attr->context_switch)
+               return true;
+       return false;
+}
+
+static void unaccount_pmu_sb_event(struct perf_event *event)
+{
+       if (is_sb_event(event))
+               detach_sb_event(event);
+}
+
 static void unaccount_event_cpu(struct perf_event *event, int cpu)
 {
        if (event->parent)
@@ -3728,6 +3790,8 @@ static void unaccount_event(struct perf_event *event)
        }
 
        unaccount_event_cpu(event, event->cpu);
+
+       unaccount_pmu_sb_event(event);
 }
 
 static void perf_sched_delayed(struct work_struct *work)
@@ -5854,11 +5918,11 @@ perf_event_read_event(struct perf_event *event,
        perf_output_end(&handle);
 }
 
-typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
+typedef void (perf_iterate_f)(struct perf_event *event, void *data);
 
 static void
-perf_event_aux_ctx(struct perf_event_context *ctx,
-                  perf_event_aux_output_cb output,
+perf_iterate_ctx(struct perf_event_context *ctx,
+                  perf_iterate_f output,
                   void *data, bool all)
 {
        struct perf_event *event;
@@ -5875,52 +5939,55 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
        }
 }
 
-static void
-perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
-                       struct perf_event_context *task_ctx)
+static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
 {
-       rcu_read_lock();
-       preempt_disable();
-       perf_event_aux_ctx(task_ctx, output, data, false);
-       preempt_enable();
-       rcu_read_unlock();
+       struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
+       struct perf_event *event;
+
+       list_for_each_entry_rcu(event, &pel->list, sb_list) {
+               if (event->state < PERF_EVENT_STATE_INACTIVE)
+                       continue;
+               if (!event_filter_match(event))
+                       continue;
+               output(event, data);
+       }
 }
 
+/*
+ * Iterate all events that need to receive side-band events.
+ *
+ * For new callers; ensure that account_pmu_sb_event() includes
+ * your event, otherwise it might not get delivered.
+ */
 static void
-perf_event_aux(perf_event_aux_output_cb output, void *data,
+perf_iterate_sb(perf_iterate_f output, void *data,
               struct perf_event_context *task_ctx)
 {
-       struct perf_cpu_context *cpuctx;
        struct perf_event_context *ctx;
-       struct pmu *pmu;
        int ctxn;
 
+       rcu_read_lock();
+       preempt_disable();
+
        /*
-        * If we have task_ctx != NULL we only notify
-        * the task context itself. The task_ctx is set
-        * only for EXIT events before releasing task
+        * If we have task_ctx != NULL we only notify the task context itself.
+        * The task_ctx is set only for EXIT events before releasing task
         * context.
         */
        if (task_ctx) {
-               perf_event_aux_task_ctx(output, data, task_ctx);
-               return;
+               perf_iterate_ctx(task_ctx, output, data, false);
+               goto done;
        }
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
-               if (cpuctx->unique_pmu != pmu)
-                       goto next;
-               perf_event_aux_ctx(&cpuctx->ctx, output, data, false);
-               ctxn = pmu->task_ctx_nr;
-               if (ctxn < 0)
-                       goto next;
+       perf_iterate_sb_cpu(output, data);
+
+       for_each_task_context_nr(ctxn) {
                ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
                if (ctx)
-                       perf_event_aux_ctx(ctx, output, data, false);
-next:
-               put_cpu_ptr(pmu->pmu_cpu_context);
+                       perf_iterate_ctx(ctx, output, data, false);
        }
+done:
+       preempt_enable();
        rcu_read_unlock();
 }
 
@@ -5969,7 +6036,7 @@ void perf_event_exec(void)
 
                perf_event_enable_on_exec(ctxn);
 
-               perf_event_aux_ctx(ctx, perf_event_addr_filters_exec, NULL,
+               perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
                                   true);
        }
        rcu_read_unlock();
@@ -6013,9 +6080,9 @@ static int __perf_pmu_output_stop(void *info)
        };
 
        rcu_read_lock();
-       perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
+       perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
        if (cpuctx->task_ctx)
-               perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop,
+               perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
                                   &ro, false);
        rcu_read_unlock();
 
@@ -6144,7 +6211,7 @@ static void perf_event_task(struct task_struct *task,
                },
        };
 
-       perf_event_aux(perf_event_task_output,
+       perf_iterate_sb(perf_event_task_output,
                       &task_event,
                       task_ctx);
 }
@@ -6223,7 +6290,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
 
        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
 
-       perf_event_aux(perf_event_comm_output,
+       perf_iterate_sb(perf_event_comm_output,
                       comm_event,
                       NULL);
 }
@@ -6454,7 +6521,7 @@ got_name:
 
        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
 
-       perf_event_aux(perf_event_mmap_output,
+       perf_iterate_sb(perf_event_mmap_output,
                       mmap_event,
                       NULL);
 
@@ -6537,7 +6604,7 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma)
                if (!ctx)
                        continue;
 
-               perf_event_aux_ctx(ctx, __perf_addr_filters_adjust, vma, true);
+               perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
        }
        rcu_read_unlock();
 }
@@ -6724,7 +6791,7 @@ static void perf_event_switch(struct task_struct *task,
                },
        };
 
-       perf_event_aux(perf_event_switch_output,
+       perf_iterate_sb(perf_event_switch_output,
                       &switch_event,
                       NULL);
 }
@@ -8646,6 +8713,28 @@ unlock:
        return pmu;
 }
 
+static void attach_sb_event(struct perf_event *event)
+{
+       struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
+
+       raw_spin_lock(&pel->lock);
+       list_add_rcu(&event->sb_list, &pel->list);
+       raw_spin_unlock(&pel->lock);
+}
+
+/*
+ * We keep a list of all !task (and therefore per-cpu) events
+ * that need to receive side-band records.
+ *
+ * This avoids having to scan all the various PMU per-cpu contexts
+ * looking for them.
+ */
+static void account_pmu_sb_event(struct perf_event *event)
+{
+       if (is_sb_event(event))
+               attach_sb_event(event);
+}
+
 static void account_event_cpu(struct perf_event *event, int cpu)
 {
        if (event->parent)
@@ -8726,6 +8815,8 @@ static void account_event(struct perf_event *event)
 enabled:
 
        account_event_cpu(event, event->cpu);
+
+       account_pmu_sb_event(event);
 }
 
 /*
@@ -8874,7 +8965,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        if (!event->parent) {
                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
-                       err = get_callchain_buffers();
+                       err = get_callchain_buffers(attr->sample_max_stack);
                        if (err)
                                goto err_addr_filters;
                }
@@ -9196,6 +9287,9 @@ SYSCALL_DEFINE5(perf_event_open,
                        return -EINVAL;
        }
 
+       if (!attr.sample_max_stack)
+               attr.sample_max_stack = sysctl_perf_event_max_stack;
+
        /*
         * In cgroup mode, the pid argument is used to pass the fd
         * opened to the cgroup directory in cgroupfs. The cpu argument
@@ -9269,7 +9363,7 @@ SYSCALL_DEFINE5(perf_event_open,
 
        if (is_sampling_event(event)) {
                if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
-                       err = -ENOTSUPP;
+                       err = -EOPNOTSUPP;
                        goto err_alloc;
                }
        }
@@ -10231,6 +10325,9 @@ static void __init perf_event_init_all_cpus(void)
                swhash = &per_cpu(swevent_htable, cpu);
                mutex_init(&swhash->hlist_mutex);
                INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
+
+               INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
+               raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
        }
 }
 
index 9e6e1356e6bbc2dbb2aeb9ef5b2b02d05961ad3c..84ae830234f8fea6328690ebf977a7063aa91097 100644 (file)
@@ -210,6 +210,82 @@ repeat:
                goto repeat;
 }
 
+/*
+ * Note that if this function returns a valid task_struct pointer (!NULL)
+ * task->usage must remain >0 for the duration of the RCU critical section.
+ */
+struct task_struct *task_rcu_dereference(struct task_struct **ptask)
+{
+       struct sighand_struct *sighand;
+       struct task_struct *task;
+
+       /*
+        * We need to verify that release_task() was not called and thus
+        * delayed_put_task_struct() can't run and drop the last reference
+        * before rcu_read_unlock(). We check task->sighand != NULL,
+        * but we can read the already freed and reused memory.
+        */
+retry:
+       task = rcu_dereference(*ptask);
+       if (!task)
+               return NULL;
+
+       probe_kernel_address(&task->sighand, sighand);
+
+       /*
+        * Pairs with atomic_dec_and_test() in put_task_struct(). If this task
+        * was already freed we can not miss the preceding update of this
+        * pointer.
+        */
+       smp_rmb();
+       if (unlikely(task != READ_ONCE(*ptask)))
+               goto retry;
+
+       /*
+        * We've re-checked that "task == *ptask", now we have two different
+        * cases:
+        *
+        * 1. This is actually the same task/task_struct. In this case
+        *    sighand != NULL tells us it is still alive.
+        *
+        * 2. This is another task which got the same memory for task_struct.
+        *    We can't know this of course, and we can not trust
+        *    sighand != NULL.
+        *
+        *    In this case we actually return a random value, but this is
+        *    correct.
+        *
+        *    If we return NULL - we can pretend that we actually noticed that
+        *    *ptask was updated when the previous task has exited. Or pretend
+        *    that probe_slab_address(&sighand) reads NULL.
+        *
+        *    If we return the new task (because sighand is not NULL for any
+        *    reason) - this is fine too. This (new) task can't go away before
+        *    another gp pass.
+        *
+        *    And note: We could even eliminate the false positive if re-read
+        *    task->sighand once again to avoid the falsely NULL. But this case
+        *    is very unlikely so we don't care.
+        */
+       if (!sighand)
+               return NULL;
+
+       return task;
+}
+
+struct task_struct *try_get_task_struct(struct task_struct **ptask)
+{
+       struct task_struct *task;
+
+       rcu_read_lock();
+       task = task_rcu_dereference(ptask);
+       if (task)
+               get_task_struct(task);
+       rcu_read_unlock();
+
+       return task;
+}
+
 /*
  * Determine if a process group is "orphaned", according to the POSIX
  * definition in 2.2.2.52.  Orphaned process groups are not to be affected
@@ -700,10 +776,14 @@ void do_exit(long code)
 
        exit_signals(tsk);  /* sets PF_EXITING */
        /*
-        * tsk->flags are checked in the futex code to protect against
-        * an exiting task cleaning up the robust pi futexes.
+        * Ensure that all new tsk->pi_lock acquisitions must observe
+        * PF_EXITING. Serializes against futex.c:attach_to_pi_owner().
         */
        smp_mb();
+       /*
+        * Ensure that we must observe the pi_state in exit_mm() ->
+        * mm_release() -> exit_pi_state_list().
+        */
        raw_spin_unlock_wait(&tsk->pi_lock);
 
        if (unlikely(in_atomic())) {
index e25e92fb44face315265d43d981b71e193693f2a..6a5c239c7669c5ad8e01565be5a6fd38ae7d4452 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/vmalloc.h>
 #include "gcov.h"
 
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
 #define GCOV_COUNTERS                  10
 #elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
 #define GCOV_COUNTERS                  9
index 4b353e0be1215a081e2cf367d3a3074700e4a685..0dbea887d6258522e7c2c10692f24ced83b51b8c 100644 (file)
@@ -452,7 +452,7 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
        return notifier_from_errno(ret);
 }
 
-struct notifier_block jump_label_module_nb = {
+static struct notifier_block jump_label_module_nb = {
        .notifier_call = jump_label_module_notify,
        .priority = 1, /* higher than tracepoints */
 };
index 81f1a7107c0eb7b947c62658d886f556232d513a..589d763a49b3952322f08c00286675f314a3f76c 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/gfp.h>
 #include <linux/kmemcheck.h>
 #include <linux/random.h>
+#include <linux/jhash.h>
 
 #include <asm/sections.h>
 
@@ -309,10 +310,14 @@ static struct hlist_head chainhash_table[CHAINHASH_SIZE];
  * It's a 64-bit hash, because it's important for the keys to be
  * unique.
  */
-#define iterate_chain_key(key1, key2) \
-       (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
-       ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
-       (key2))
+static inline u64 iterate_chain_key(u64 key, u32 idx)
+{
+       u32 k0 = key, k1 = key >> 32;
+
+       __jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
+
+       return k0 | (u64)k1 << 32;
+}
 
 void lockdep_off(void)
 {
index d06ae3bb46c5f9f928cf7123464f49e284ab3ecf..57a871ae3c81a98e324b9ba907ef65f5c4f1cb8c 100644 (file)
@@ -29,12 +29,12 @@ extern void debug_mutex_init(struct mutex *lock, const char *name,
 
 static inline void mutex_set_owner(struct mutex *lock)
 {
-       lock->owner = current;
+       WRITE_ONCE(lock->owner, current);
 }
 
 static inline void mutex_clear_owner(struct mutex *lock)
 {
-       lock->owner = NULL;
+       WRITE_ONCE(lock->owner, NULL);
 }
 
 #define spin_lock_mutex(lock, flags)                   \
index a68bae5e852a08f1c0d75587b57c5e31835f937e..6cd6b8e9efd7f73175be41f46d062650da821af8 100644 (file)
                __list_del((waiter)->list.prev, (waiter)->list.next)
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
+/*
+ * The mutex owner can get read and written to locklessly.
+ * We should use WRITE_ONCE when writing the owner value to
+ * avoid store tearing, otherwise, a thread could potentially
+ * read a partially written and incomplete owner value.
+ */
 static inline void mutex_set_owner(struct mutex *lock)
 {
-       lock->owner = current;
+       WRITE_ONCE(lock->owner, current);
 }
 
 static inline void mutex_clear_owner(struct mutex *lock)
 {
-       lock->owner = NULL;
+       WRITE_ONCE(lock->owner, NULL);
 }
 #else
 static inline void mutex_set_owner(struct mutex *lock)
index fec08233866875ca1962d836b4e5d6f88d475092..19248ddf37cea70b7a0e5cf0f2481fa5061bf8d5 100644 (file)
@@ -93,7 +93,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
         * that accesses can't leak upwards out of our subsequent critical
         * section in the case that the lock is currently held for write.
         */
-       cnts = atomic_add_return_acquire(_QR_BIAS, &lock->cnts) - _QR_BIAS;
+       cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
        rspin_until_writer_unlock(lock, cnts);
 
        /*
index 5fc8c311b8fe59d46decc2c5a049ce6e860a07b8..b2caec7315af5b622a00f8babfd55bb3f27fb315 100644 (file)
@@ -90,7 +90,7 @@ static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
  * therefore increment the cpu number by one.
  */
 
-static inline u32 encode_tail(int cpu, int idx)
+static inline __pure u32 encode_tail(int cpu, int idx)
 {
        u32 tail;
 
@@ -103,7 +103,7 @@ static inline u32 encode_tail(int cpu, int idx)
        return tail;
 }
 
-static inline struct mcs_spinlock *decode_tail(u32 tail)
+static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
 {
        int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
        int idx = (tail &  _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
@@ -267,6 +267,63 @@ static __always_inline u32  __pv_wait_head_or_lock(struct qspinlock *lock,
 #define queued_spin_lock_slowpath      native_queued_spin_lock_slowpath
 #endif
 
+/*
+ * Various notes on spin_is_locked() and spin_unlock_wait(), which are
+ * 'interesting' functions:
+ *
+ * PROBLEM: some architectures have an interesting issue with atomic ACQUIRE
+ * operations in that the ACQUIRE applies to the LOAD _not_ the STORE (ARM64,
+ * PPC). Also qspinlock has a similar issue per construction, the setting of
+ * the locked byte can be unordered acquiring the lock proper.
+ *
+ * This gets to be 'interesting' in the following cases, where the /should/s
+ * end up false because of this issue.
+ *
+ *
+ * CASE 1:
+ *
+ * So the spin_is_locked() correctness issue comes from something like:
+ *
+ *   CPU0                              CPU1
+ *
+ *   global_lock();                    local_lock(i)
+ *     spin_lock(&G)                     spin_lock(&L[i])
+ *     for (i)                           if (!spin_is_locked(&G)) {
+ *       spin_unlock_wait(&L[i]);          smp_acquire__after_ctrl_dep();
+ *                                         return;
+ *                                       }
+ *                                       // deal with fail
+ *
+ * Where it is important CPU1 sees G locked or CPU0 sees L[i] locked such
+ * that there is exclusion between the two critical sections.
+ *
+ * The load from spin_is_locked(&G) /should/ be constrained by the ACQUIRE from
+ * spin_lock(&L[i]), and similarly the load(s) from spin_unlock_wait(&L[i])
+ * /should/ be constrained by the ACQUIRE from spin_lock(&G).
+ *
+ * Similarly, later stuff is constrained by the ACQUIRE from CTRL+RMB.
+ *
+ *
+ * CASE 2:
+ *
+ * For spin_unlock_wait() there is a second correctness issue, namely:
+ *
+ *   CPU0                              CPU1
+ *
+ *   flag = set;
+ *   smp_mb();                         spin_lock(&l)
+ *   spin_unlock_wait(&l);             if (!flag)
+ *                                       // add to lockless list
+ *                                     spin_unlock(&l);
+ *   // iterate lockless list
+ *
+ * Which wants to ensure that CPU1 will stop adding bits to the list and CPU0
+ * will observe the last entry on the list (if spin_unlock_wait() had ACQUIRE
+ * semantics etc..)
+ *
+ * Where flag /should/ be ordered against the locked store of l.
+ */
+
 /*
  * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
  * issuing an _unordered_ store to set _Q_LOCKED_VAL.
@@ -322,7 +379,7 @@ void queued_spin_unlock_wait(struct qspinlock *lock)
                cpu_relax();
 
 done:
-       smp_rmb(); /* CTRL + RMB -> ACQUIRE */
+       smp_acquire__after_ctrl_dep();
 }
 EXPORT_SYMBOL(queued_spin_unlock_wait);
 #endif
@@ -418,7 +475,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
         * sequentiality; this is because not all clear_pending_set_locked()
         * implementations imply full barriers.
         */
-       smp_cond_acquire(!(atomic_read(&lock->val) & _Q_LOCKED_MASK));
+       smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK));
 
        /*
         * take ownership and clear the pending bit.
@@ -455,6 +512,8 @@ queue:
         * pending stuff.
         *
         * p,*,* -> n,*,*
+        *
+        * RELEASE, such that the stores to @node must be complete.
         */
        old = xchg_tail(lock, tail);
        next = NULL;
@@ -465,6 +524,15 @@ queue:
         */
        if (old & _Q_TAIL_MASK) {
                prev = decode_tail(old);
+               /*
+                * The above xchg_tail() is also a load of @lock which generates,
+                * through decode_tail(), a pointer.
+                *
+                * The address dependency matches the RELEASE of xchg_tail()
+                * such that the access to @prev must happen after.
+                */
+               smp_read_barrier_depends();
+
                WRITE_ONCE(prev->next, node);
 
                pv_wait_node(node, prev);
@@ -494,7 +562,7 @@ queue:
         *
         * The PV pv_wait_head_or_lock function, if active, will acquire
         * the lock and return a non-zero value. So we have to skip the
-        * smp_cond_acquire() call. As the next PV queue head hasn't been
+        * smp_cond_load_acquire() call. As the next PV queue head hasn't been
         * designated yet, there is no way for the locked value to become
         * _Q_SLOW_VAL. So both the set_locked() and the
         * atomic_cmpxchg_relaxed() calls will be safe.
@@ -505,7 +573,7 @@ queue:
        if ((val = pv_wait_head_or_lock(lock, node)))
                goto locked;
 
-       smp_cond_acquire(!((val = atomic_read(&lock->val)) & _Q_LOCKED_PENDING_MASK));
+       val = smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_PENDING_MASK));
 
 locked:
        /*
@@ -525,9 +593,9 @@ locked:
                        break;
                }
                /*
-                * The smp_cond_acquire() call above has provided the necessary
-                * acquire semantics required for locking. At most two
-                * iterations of this loop may be ran.
+                * The smp_cond_load_acquire() call above has provided the
+                * necessary acquire semantics required for locking. At most
+                * two iterations of this loop may be ran.
                 */
                old = atomic_cmpxchg_relaxed(&lock->val, val, _Q_LOCKED_VAL);
                if (old == val)
@@ -551,7 +619,7 @@ release:
        /*
         * release the node
         */
-       this_cpu_dec(mcs_nodes[0].count);
+       __this_cpu_dec(mcs_nodes[0].count);
 }
 EXPORT_SYMBOL(queued_spin_lock_slowpath);
 
index 21ede57f68b320594f874e4ff0a1b60974fc67f0..37649e69056cf974e27d0137260f8ff46ad688df 100644 (file)
@@ -112,12 +112,12 @@ static __always_inline int trylock_clear_pending(struct qspinlock *lock)
 #else /* _Q_PENDING_BITS == 8 */
 static __always_inline void set_pending(struct qspinlock *lock)
 {
-       atomic_set_mask(_Q_PENDING_VAL, &lock->val);
+       atomic_or(_Q_PENDING_VAL, &lock->val);
 }
 
 static __always_inline void clear_pending(struct qspinlock *lock)
 {
-       atomic_clear_mask(_Q_PENDING_VAL, &lock->val);
+       atomic_andnot(_Q_PENDING_VAL, &lock->val);
 }
 
 static __always_inline int trylock_clear_pending(struct qspinlock *lock)
index 3e746607abe5230bb9c650957582669c085d9a90..1ec0f48962b33dce424d4d3f491c6a1fd76ba13d 100644 (file)
@@ -1478,7 +1478,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
  */
 int __sched rt_mutex_trylock(struct rt_mutex *lock)
 {
-       if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
+       if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
                return 0;
 
        return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
index 09e30c6225e5373c523a034f42924b9973fcb7a4..447e08de1fabf11b3d2050c5735a0ea331362fc4 100644 (file)
@@ -80,7 +80,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
        lockdep_init_map(&sem->dep_map, name, key, 0);
 #endif
-       sem->count = RWSEM_UNLOCKED_VALUE;
+       atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
@@ -114,12 +114,16 @@ enum rwsem_wake_type {
  *   - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
  *   - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
  * - there must be someone on the queue
- * - the spinlock must be held by the caller
+ * - the wait_lock must be held by the caller
+ * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
+ *   to actually wakeup the blocked task(s) and drop the reference count,
+ *   preferably when the wait_lock is released
  * - woken process blocks are discarded from the list after having task zeroed
- * - writers are only woken if downgrading is false
+ * - writers are only marked woken if downgrading is false
  */
 static struct rw_semaphore *
-__rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
+__rwsem_mark_wake(struct rw_semaphore *sem,
+                 enum rwsem_wake_type wake_type, struct wake_q_head *wake_q)
 {
        struct rwsem_waiter *waiter;
        struct task_struct *tsk;
@@ -128,13 +132,16 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
 
        waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
        if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
-               if (wake_type == RWSEM_WAKE_ANY)
-                       /* Wake writer at the front of the queue, but do not
-                        * grant it the lock yet as we want other writers
-                        * to be able to steal it.  Readers, on the other hand,
-                        * will block as they will notice the queued writer.
+               if (wake_type == RWSEM_WAKE_ANY) {
+                       /*
+                        * Mark writer at the front of the queue for wakeup.
+                        * Until the task is actually later awoken later by
+                        * the caller, other writers are able to steal it.
+                        * Readers, on the other hand, will block as they
+                        * will notice the queued writer.
                         */
-                       wake_up_process(waiter->task);
+                       wake_q_add(wake_q, waiter->task);
+               }
                goto out;
        }
 
@@ -146,15 +153,27 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
        if (wake_type != RWSEM_WAKE_READ_OWNED) {
                adjustment = RWSEM_ACTIVE_READ_BIAS;
  try_reader_grant:
-               oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
+               oldcount = atomic_long_fetch_add(adjustment, &sem->count);
+
                if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
-                       /* A writer stole the lock. Undo our reader grant. */
-                       if (rwsem_atomic_update(-adjustment, sem) &
-                                               RWSEM_ACTIVE_MASK)
+                       /*
+                        * If the count is still less than RWSEM_WAITING_BIAS
+                        * after removing the adjustment, it is assumed that
+                        * a writer has stolen the lock. We have to undo our
+                        * reader grant.
+                        */
+                       if (atomic_long_add_return(-adjustment, &sem->count) <
+                           RWSEM_WAITING_BIAS)
                                goto out;
                        /* Last active locker left. Retry waking readers. */
                        goto try_reader_grant;
                }
+               /*
+                * It is not really necessary to set it to reader-owned here,
+                * but it gives the spinners an early indication that the
+                * readers now have the lock.
+                */
+               rwsem_set_reader_owned(sem);
        }
 
        /* Grant an infinite number of read locks to the readers at the front
@@ -179,7 +198,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
                adjustment -= RWSEM_WAITING_BIAS;
 
        if (adjustment)
-               rwsem_atomic_add(adjustment, sem);
+               atomic_long_add(adjustment, &sem->count);
 
        next = sem->wait_list.next;
        loop = woken;
@@ -187,17 +206,15 @@ __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
                waiter = list_entry(next, struct rwsem_waiter, list);
                next = waiter->list.next;
                tsk = waiter->task;
+
+               wake_q_add(wake_q, tsk);
                /*
-                * Make sure we do not wakeup the next reader before
-                * setting the nil condition to grant the next reader;
-                * otherwise we could miss the wakeup on the other
-                * side and end up sleeping again. See the pairing
-                * in rwsem_down_read_failed().
+                * Ensure that the last operation is setting the reader
+                * waiter to nil such that rwsem_down_read_failed() cannot
+                * race with do_exit() by always holding a reference count
+                * to the task to wakeup.
                 */
-               smp_mb();
-               waiter->task = NULL;
-               wake_up_process(tsk);
-               put_task_struct(tsk);
+               smp_store_release(&waiter->task, NULL);
        } while (--loop);
 
        sem->wait_list.next = next;
@@ -216,11 +233,11 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
        long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
        struct rwsem_waiter waiter;
        struct task_struct *tsk = current;
+       WAKE_Q(wake_q);
 
        /* set up my own style of waitqueue */
        waiter.task = tsk;
        waiter.type = RWSEM_WAITING_FOR_READ;
-       get_task_struct(tsk);
 
        raw_spin_lock_irq(&sem->wait_lock);
        if (list_empty(&sem->wait_list))
@@ -228,7 +245,7 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
        list_add_tail(&waiter.list, &sem->wait_list);
 
        /* we're now waiting on the lock, but no longer actively locking */
-       count = rwsem_atomic_update(adjustment, sem);
+       count = atomic_long_add_return(adjustment, &sem->count);
 
        /* If there are no active locks, wake the front queued process(es).
         *
@@ -238,9 +255,10 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
        if (count == RWSEM_WAITING_BIAS ||
            (count > RWSEM_WAITING_BIAS &&
             adjustment != -RWSEM_ACTIVE_READ_BIAS))
-               sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
+               sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 
        raw_spin_unlock_irq(&sem->wait_lock);
+       wake_up_q(&wake_q);
 
        /* wait to be given the lock */
        while (true) {
@@ -255,17 +273,29 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
 }
 EXPORT_SYMBOL(rwsem_down_read_failed);
 
+/*
+ * This function must be called with the sem->wait_lock held to prevent
+ * race conditions between checking the rwsem wait list and setting the
+ * sem->count accordingly.
+ */
 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
 {
        /*
-        * Try acquiring the write lock. Check count first in order
-        * to reduce unnecessary expensive cmpxchg() operations.
+        * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
         */
-       if (count == RWSEM_WAITING_BIAS &&
-           cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
-                   RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
-               if (!list_is_singular(&sem->wait_list))
-                       rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
+       if (count != RWSEM_WAITING_BIAS)
+               return false;
+
+       /*
+        * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
+        * are other tasks on the wait list, we need to add on WAITING_BIAS.
+        */
+       count = list_is_singular(&sem->wait_list) ?
+                       RWSEM_ACTIVE_WRITE_BIAS :
+                       RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
+
+       if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
+                                                       == RWSEM_WAITING_BIAS) {
                rwsem_set_owner(sem);
                return true;
        }
@@ -279,13 +309,13 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
  */
 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 {
-       long old, count = READ_ONCE(sem->count);
+       long old, count = atomic_long_read(&sem->count);
 
        while (true) {
                if (!(count == 0 || count == RWSEM_WAITING_BIAS))
                        return false;
 
-               old = cmpxchg_acquire(&sem->count, count,
+               old = atomic_long_cmpxchg_acquire(&sem->count, count,
                                      count + RWSEM_ACTIVE_WRITE_BIAS);
                if (old == count) {
                        rwsem_set_owner(sem);
@@ -306,16 +336,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 
        rcu_read_lock();
        owner = READ_ONCE(sem->owner);
-       if (!owner) {
-               long count = READ_ONCE(sem->count);
+       if (!rwsem_owner_is_writer(owner)) {
                /*
-                * If sem->owner is not set, yet we have just recently entered the
-                * slowpath with the lock being active, then there is a possibility
-                * reader(s) may have the lock. To be safe, bail spinning in these
-                * situations.
+                * Don't spin if the rwsem is readers owned.
                 */
-               if (count & RWSEM_ACTIVE_MASK)
-                       ret = false;
+               ret = !rwsem_owner_is_reader(owner);
                goto done;
        }
 
@@ -325,10 +350,15 @@ done:
        return ret;
 }
 
-static noinline
-bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
+/*
+ * Return true only if we can still spin on the owner field of the rwsem.
+ */
+static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
 {
-       long count;
+       struct task_struct *owner = READ_ONCE(sem->owner);
+
+       if (!rwsem_owner_is_writer(owner))
+               goto out;
 
        rcu_read_lock();
        while (sem->owner == owner) {
@@ -349,22 +379,16 @@ bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
                cpu_relax_lowlatency();
        }
        rcu_read_unlock();
-
-       if (READ_ONCE(sem->owner))
-               return true; /* new owner, continue spinning */
-
+out:
        /*
-        * When the owner is not set, the lock could be free or
-        * held by readers. Check the counter to verify the
-        * state.
+        * If there is a new owner or the owner is not set, we continue
+        * spinning.
         */
-       count = READ_ONCE(sem->count);
-       return (count == 0 || count == RWSEM_WAITING_BIAS);
+       return !rwsem_owner_is_reader(READ_ONCE(sem->owner));
 }
 
 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 {
-       struct task_struct *owner;
        bool taken = false;
 
        preempt_disable();
@@ -376,12 +400,17 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
        if (!osq_lock(&sem->osq))
                goto done;
 
-       while (true) {
-               owner = READ_ONCE(sem->owner);
-               if (owner && !rwsem_spin_on_owner(sem, owner))
-                       break;
-
-               /* wait_lock will be acquired if write_lock is obtained */
+       /*
+        * Optimistically spin on the owner field and attempt to acquire the
+        * lock whenever the owner changes. Spinning will be stopped when:
+        *  1) the owning writer isn't running; or
+        *  2) readers own the lock as we can't determine if they are
+        *     actively running or not.
+        */
+       while (rwsem_spin_on_owner(sem)) {
+               /*
+                * Try to acquire the lock
+                */
                if (rwsem_try_write_lock_unqueued(sem)) {
                        taken = true;
                        break;
@@ -393,7 +422,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
                 * we're an RT task that will live-lock because we won't let
                 * the owner complete.
                 */
-               if (!owner && (need_resched() || rt_task(current)))
+               if (!sem->owner && (need_resched() || rt_task(current)))
                        break;
 
                /*
@@ -440,9 +469,10 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
        bool waiting = true; /* any queued threads before us */
        struct rwsem_waiter waiter;
        struct rw_semaphore *ret = sem;
+       WAKE_Q(wake_q);
 
        /* undo write bias from down_write operation, stop active locking */
-       count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
+       count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
 
        /* do optimistic spinning and steal lock if possible */
        if (rwsem_optimistic_spin(sem))
@@ -465,18 +495,29 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
 
        /* we're now waiting on the lock, but no longer actively locking */
        if (waiting) {
-               count = READ_ONCE(sem->count);
+               count = atomic_long_read(&sem->count);
 
                /*
                 * If there were already threads queued before us and there are
                 * no active writers, the lock must be read owned; so we try to
                 * wake any read locks that were queued ahead of us.
                 */
-               if (count > RWSEM_WAITING_BIAS)
-                       sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
+               if (count > RWSEM_WAITING_BIAS) {
+                       WAKE_Q(wake_q);
+
+                       sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
+                       /*
+                        * The wakeup is normally called _after_ the wait_lock
+                        * is released, but given that we are proactively waking
+                        * readers we can deal with the wake_q overhead as it is
+                        * similar to releasing and taking the wait_lock again
+                        * for attempting rwsem_try_write_lock().
+                        */
+                       wake_up_q(&wake_q);
+               }
 
        } else
-               count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
+               count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
 
        /* wait until we successfully acquire the lock */
        set_current_state(state);
@@ -492,7 +533,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
 
                        schedule();
                        set_current_state(state);
-               } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
+               } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
 
                raw_spin_lock_irq(&sem->wait_lock);
        }
@@ -507,10 +548,11 @@ out_nolock:
        raw_spin_lock_irq(&sem->wait_lock);
        list_del(&waiter.list);
        if (list_empty(&sem->wait_list))
-               rwsem_atomic_update(-RWSEM_WAITING_BIAS, sem);
+               atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
        else
-               __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
+               __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
        raw_spin_unlock_irq(&sem->wait_lock);
+       wake_up_q(&wake_q);
 
        return ERR_PTR(-EINTR);
 }
@@ -537,6 +579,7 @@ __visible
 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
+       WAKE_Q(wake_q);
 
        /*
         * If a spinner is present, it is not necessary to do the wakeup.
@@ -573,9 +616,10 @@ locked:
 
        /* do nothing if list empty */
        if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
+               sem = __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+       wake_up_q(&wake_q);
 
        return sem;
 }
@@ -590,14 +634,16 @@ __visible
 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
+       WAKE_Q(wake_q);
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
        /* do nothing if list empty */
        if (!list_empty(&sem->wait_list))
-               sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
+               sem = __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+       wake_up_q(&wake_q);
 
        return sem;
 }
index 2e853ad93a3a0f27ac3ce676ebe637ad8e03199e..45ba475d4be344b76b1b35e6a174d8f8085788ce 100644 (file)
@@ -22,6 +22,7 @@ void __sched down_read(struct rw_semaphore *sem)
        rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
 
        LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
+       rwsem_set_reader_owned(sem);
 }
 
 EXPORT_SYMBOL(down_read);
@@ -33,8 +34,10 @@ int down_read_trylock(struct rw_semaphore *sem)
 {
        int ret = __down_read_trylock(sem);
 
-       if (ret == 1)
+       if (ret == 1) {
                rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
+               rwsem_set_reader_owned(sem);
+       }
        return ret;
 }
 
@@ -124,7 +127,7 @@ void downgrade_write(struct rw_semaphore *sem)
         * lockdep: a downgraded write will live on as a write
         * dependency.
         */
-       rwsem_clear_owner(sem);
+       rwsem_set_reader_owned(sem);
        __downgrade_write(sem);
 }
 
@@ -138,6 +141,7 @@ void down_read_nested(struct rw_semaphore *sem, int subclass)
        rwsem_acquire_read(&sem->dep_map, subclass, 0, _RET_IP_);
 
        LOCK_CONTENDED(sem, __down_read_trylock, __down_read);
+       rwsem_set_reader_owned(sem);
 }
 
 EXPORT_SYMBOL(down_read_nested);
index 870ed9a5b4260c3c470924ad61e86ed91cefacaa..a699f4048ba11c76017a32ed0b5bc86b0ce45843 100644 (file)
@@ -1,14 +1,58 @@
+/*
+ * The owner field of the rw_semaphore structure will be set to
+ * RWSEM_READ_OWNED when a reader grabs the lock. A writer will clear
+ * the owner field when it unlocks. A reader, on the other hand, will
+ * not touch the owner field when it unlocks.
+ *
+ * In essence, the owner field now has the following 3 states:
+ *  1) 0
+ *     - lock is free or the owner hasn't set the field yet
+ *  2) RWSEM_READER_OWNED
+ *     - lock is currently or previously owned by readers (lock is free
+ *       or not set by owner yet)
+ *  3) Other non-zero value
+ *     - a writer owns the lock
+ */
+#define RWSEM_READER_OWNED     ((struct task_struct *)1UL)
+
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+/*
+ * All writes to owner are protected by WRITE_ONCE() to make sure that
+ * store tearing can't happen as optimistic spinners may read and use
+ * the owner value concurrently without lock. Read from owner, however,
+ * may not need READ_ONCE() as long as the pointer value is only used
+ * for comparison and isn't being dereferenced.
+ */
 static inline void rwsem_set_owner(struct rw_semaphore *sem)
 {
-       sem->owner = current;
+       WRITE_ONCE(sem->owner, current);
 }
 
 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
 {
-       sem->owner = NULL;
+       WRITE_ONCE(sem->owner, NULL);
+}
+
+static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
+{
+       /*
+        * We check the owner value first to make sure that we will only
+        * do a write to the rwsem cacheline when it is really necessary
+        * to minimize cacheline contention.
+        */
+       if (sem->owner != RWSEM_READER_OWNED)
+               WRITE_ONCE(sem->owner, RWSEM_READER_OWNED);
+}
+
+static inline bool rwsem_owner_is_writer(struct task_struct *owner)
+{
+       return owner && owner != RWSEM_READER_OWNED;
 }
 
+static inline bool rwsem_owner_is_reader(struct task_struct *owner)
+{
+       return owner == RWSEM_READER_OWNED;
+}
 #else
 static inline void rwsem_set_owner(struct rw_semaphore *sem)
 {
@@ -17,4 +61,8 @@ static inline void rwsem_set_owner(struct rw_semaphore *sem)
 static inline void rwsem_clear_owner(struct rw_semaphore *sem)
 {
 }
+
+static inline void rwsem_set_reader_owned(struct rw_semaphore *sem)
+{
+}
 #endif
index fca9254280ee4fad96bc8f2a61772160975c4cd6..9021387c6ff4bcc977eab062323f76a7e5f348e1 100644 (file)
@@ -1154,11 +1154,6 @@ static int __init nohibernate_setup(char *str)
        return 1;
 }
 
-static int __init kaslr_nohibernate_setup(char *str)
-{
-       return nohibernate_setup(str);
-}
-
 static int __init page_poison_nohibernate_setup(char *str)
 {
 #ifdef CONFIG_PAGE_POISONING_ZERO
@@ -1182,5 +1177,4 @@ __setup("hibernate=", hibernate_setup);
 __setup("resumewait", resumewait_setup);
 __setup("resumedelay=", resumedelay_setup);
 __setup("nohibernate", nohibernate_setup);
-__setup("kaslr", kaslr_nohibernate_setup);
 __setup("page_poison=", page_poison_nohibernate_setup);
index 3cee0d8393ed8adba3bf4136fe29f420890e94ec..d38ab08a3fe7eb5d92caf5b0c4d590299d912d90 100644 (file)
@@ -58,7 +58,7 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
 #define VERBOSE_PERFOUT_ERRSTRING(s) \
        do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
 
-torture_param(bool, gp_exp, true, "Use expedited GP wait primitives");
+torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
 torture_param(int, nreaders, -1, "Number of RCU reader threads");
 torture_param(int, nwriters, -1, "Number of RCU updater threads");
@@ -96,12 +96,7 @@ static int rcu_perf_writer_state;
 #define MAX_MEAS 10000
 #define MIN_MEAS 100
 
-#if defined(MODULE) || defined(CONFIG_RCU_PERF_TEST_RUNNABLE)
-#define RCUPERF_RUNNABLE_INIT 1
-#else
-#define RCUPERF_RUNNABLE_INIT 0
-#endif
-static int perf_runnable = RCUPERF_RUNNABLE_INIT;
+static int perf_runnable = IS_ENABLED(MODULE);
 module_param(perf_runnable, int, 0444);
 MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot");
 
@@ -363,8 +358,6 @@ rcu_perf_writer(void *arg)
        u64 *wdpp = writer_durations[me];
 
        VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
-       WARN_ON(rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp);
-       WARN_ON(rcu_gp_is_normal() && gp_exp);
        WARN_ON(!wdpp);
        set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
        sp.sched_priority = 1;
@@ -631,12 +624,24 @@ rcu_perf_init(void)
                firsterr = -ENOMEM;
                goto unwind;
        }
+       if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp) {
+               VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
+               firsterr = -EINVAL;
+               goto unwind;
+       }
+       if (rcu_gp_is_normal() && gp_exp) {
+               VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
+               firsterr = -EINVAL;
+               goto unwind;
+       }
        for (i = 0; i < nrealwriters; i++) {
                writer_durations[i] =
                        kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
                                GFP_KERNEL);
-               if (!writer_durations[i])
+               if (!writer_durations[i]) {
+                       firsterr = -ENOMEM;
                        goto unwind;
+               }
                firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
                                                  writer_tasks[i]);
                if (firsterr)
index 084a28a732eb7ec53f3a0c055d6be031bb462941..971e2b138063a6920a96adc1f21e65619746a04b 100644 (file)
@@ -182,12 +182,7 @@ static const char *rcu_torture_writer_state_getname(void)
        return rcu_torture_writer_state_names[i];
 }
 
-#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
-#define RCUTORTURE_RUNNABLE_INIT 1
-#else
-#define RCUTORTURE_RUNNABLE_INIT 0
-#endif
-static int torture_runnable = RCUTORTURE_RUNNABLE_INIT;
+static int torture_runnable = IS_ENABLED(MODULE);
 module_param(torture_runnable, int, 0444);
 MODULE_PARM_DESC(torture_runnable, "Start rcutorture at boot");
 
@@ -1476,7 +1471,7 @@ static int rcu_torture_barrier_cbs(void *arg)
                        break;
                /*
                 * The above smp_load_acquire() ensures barrier_phase load
-                * is ordered before the folloiwng ->call().
+                * is ordered before the following ->call().
                 */
                local_irq_disable(); /* Just to test no-irq call_rcu(). */
                cur_ops->call(&rcu, rcu_torture_barrier_cbf);
index c7f1bc4f817c4a34e19ebc160693a27f034dbac2..f433959e9322c20c9c93aa7d502d1deb5b0011c2 100644 (file)
@@ -125,12 +125,14 @@ int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 /* Number of rcu_nodes at specified level. */
 static int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
 int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
+/* panic() on RCU Stall sysctl. */
+int sysctl_panic_on_rcu_stall __read_mostly;
 
 /*
  * The rcu_scheduler_active variable transitions from zero to one just
  * before the first task is spawned.  So when this variable is zero, RCU
  * can assume that there is but one task, allowing RCU to (for example)
- * optimize synchronize_sched() to a simple barrier().  When this variable
+ * optimize synchronize_rcu() to a simple barrier().  When this variable
  * is one, RCU must actually do all the hard work required to detect real
  * grace periods.  This variable is also used to suppress boot-time false
  * positives from lockdep-RCU error checking.
@@ -159,6 +161,7 @@ static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 static void rcu_report_exp_rdp(struct rcu_state *rsp,
                               struct rcu_data *rdp, bool wake);
+static void sync_sched_exp_online_cleanup(int cpu);
 
 /* rcuc/rcub kthread realtime priority */
 #ifdef CONFIG_RCU_KTHREAD_PRIO
@@ -1284,9 +1287,9 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->qsmask != 0) {
-                       for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
-                               if (rnp->qsmask & (1UL << cpu))
-                                       dump_cpu_task(rnp->grplo + cpu);
+                       for_each_leaf_node_possible_cpu(rnp, cpu)
+                               if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
+                                       dump_cpu_task(cpu);
                }
                raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
@@ -1311,6 +1314,12 @@ static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
        }
 }
 
+static inline void panic_on_rcu_stall(void)
+{
+       if (sysctl_panic_on_rcu_stall)
+               panic("RCU Stall\n");
+}
+
 static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
 {
        int cpu;
@@ -1351,10 +1360,9 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                ndetected += rcu_print_task_stall(rnp);
                if (rnp->qsmask != 0) {
-                       for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
-                               if (rnp->qsmask & (1UL << cpu)) {
-                                       print_cpu_stall_info(rsp,
-                                                            rnp->grplo + cpu);
+                       for_each_leaf_node_possible_cpu(rnp, cpu)
+                               if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
+                                       print_cpu_stall_info(rsp, cpu);
                                        ndetected++;
                                }
                }
@@ -1390,6 +1398,8 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
 
        rcu_check_gp_kthread_starvation(rsp);
 
+       panic_on_rcu_stall();
+
        force_quiescent_state(rsp);  /* Kick them all. */
 }
 
@@ -1430,6 +1440,8 @@ static void print_cpu_stall(struct rcu_state *rsp)
                           jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
+       panic_on_rcu_stall();
+
        /*
         * Attempt to revive the RCU machinery by forcing a context switch.
         *
@@ -1989,8 +2001,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
         * of the tree within the rsp->node[] array.  Note that other CPUs
         * will access only the leaves of the hierarchy, thus seeing that no
         * grace period is in progress, at least until the corresponding
-        * leaf node has been initialized.  In addition, we have excluded
-        * CPU-hotplug operations.
+        * leaf node has been initialized.
         *
         * The grace period cannot complete until the initialization
         * process finishes, because this kthread handles both.
@@ -2872,7 +2883,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
                                  unsigned long *maxj),
                         bool *isidle, unsigned long *maxj)
 {
-       unsigned long bit;
        int cpu;
        unsigned long flags;
        unsigned long mask;
@@ -2907,9 +2917,8 @@ static void force_qs_rnp(struct rcu_state *rsp,
                                continue;
                        }
                }
-               cpu = rnp->grplo;
-               bit = 1;
-               for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
+               for_each_leaf_node_possible_cpu(rnp, cpu) {
+                       unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
                        if ((rnp->qsmask & bit) != 0) {
                                if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
                                        mask |= bit;
@@ -3448,549 +3457,6 @@ static bool rcu_seq_done(unsigned long *sp, unsigned long s)
        return ULONG_CMP_GE(READ_ONCE(*sp), s);
 }
 
-/* Wrapper functions for expedited grace periods.  */
-static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
-{
-       rcu_seq_start(&rsp->expedited_sequence);
-}
-static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
-{
-       rcu_seq_end(&rsp->expedited_sequence);
-       smp_mb(); /* Ensure that consecutive grace periods serialize. */
-}
-static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
-{
-       unsigned long s;
-
-       smp_mb(); /* Caller's modifications seen first by other CPUs. */
-       s = rcu_seq_snap(&rsp->expedited_sequence);
-       trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
-       return s;
-}
-static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
-{
-       return rcu_seq_done(&rsp->expedited_sequence, s);
-}
-
-/*
- * Reset the ->expmaskinit values in the rcu_node tree to reflect any
- * recent CPU-online activity.  Note that these masks are not cleared
- * when CPUs go offline, so they reflect the union of all CPUs that have
- * ever been online.  This means that this function normally takes its
- * no-work-to-do fastpath.
- */
-static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
-{
-       bool done;
-       unsigned long flags;
-       unsigned long mask;
-       unsigned long oldmask;
-       int ncpus = READ_ONCE(rsp->ncpus);
-       struct rcu_node *rnp;
-       struct rcu_node *rnp_up;
-
-       /* If no new CPUs onlined since last time, nothing to do. */
-       if (likely(ncpus == rsp->ncpus_snap))
-               return;
-       rsp->ncpus_snap = ncpus;
-
-       /*
-        * Each pass through the following loop propagates newly onlined
-        * CPUs for the current rcu_node structure up the rcu_node tree.
-        */
-       rcu_for_each_leaf_node(rsp, rnp) {
-               raw_spin_lock_irqsave_rcu_node(rnp, flags);
-               if (rnp->expmaskinit == rnp->expmaskinitnext) {
-                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-                       continue;  /* No new CPUs, nothing to do. */
-               }
-
-               /* Update this node's mask, track old value for propagation. */
-               oldmask = rnp->expmaskinit;
-               rnp->expmaskinit = rnp->expmaskinitnext;
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
-               /* If was already nonzero, nothing to propagate. */
-               if (oldmask)
-                       continue;
-
-               /* Propagate the new CPU up the tree. */
-               mask = rnp->grpmask;
-               rnp_up = rnp->parent;
-               done = false;
-               while (rnp_up) {
-                       raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
-                       if (rnp_up->expmaskinit)
-                               done = true;
-                       rnp_up->expmaskinit |= mask;
-                       raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
-                       if (done)
-                               break;
-                       mask = rnp_up->grpmask;
-                       rnp_up = rnp_up->parent;
-               }
-       }
-}
-
-/*
- * Reset the ->expmask values in the rcu_node tree in preparation for
- * a new expedited grace period.
- */
-static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
-{
-       unsigned long flags;
-       struct rcu_node *rnp;
-
-       sync_exp_reset_tree_hotplug(rsp);
-       rcu_for_each_node_breadth_first(rsp, rnp) {
-               raw_spin_lock_irqsave_rcu_node(rnp, flags);
-               WARN_ON_ONCE(rnp->expmask);
-               rnp->expmask = rnp->expmaskinit;
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-       }
-}
-
-/*
- * Return non-zero if there is no RCU expedited grace period in progress
- * for the specified rcu_node structure, in other words, if all CPUs and
- * tasks covered by the specified rcu_node structure have done their bit
- * for the current expedited grace period.  Works only for preemptible
- * RCU -- other RCU implementation use other means.
- *
- * Caller must hold the rcu_state's exp_mutex.
- */
-static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
-{
-       return rnp->exp_tasks == NULL &&
-              READ_ONCE(rnp->expmask) == 0;
-}
-
-/*
- * Report the exit from RCU read-side critical section for the last task
- * that queued itself during or before the current expedited preemptible-RCU
- * grace period.  This event is reported either to the rcu_node structure on
- * which the task was queued or to one of that rcu_node structure's ancestors,
- * recursively up the tree.  (Calm down, calm down, we do the recursion
- * iteratively!)
- *
- * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
- * structure's ->lock.
- */
-static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
-                                bool wake, unsigned long flags)
-       __releases(rnp->lock)
-{
-       unsigned long mask;
-
-       for (;;) {
-               if (!sync_rcu_preempt_exp_done(rnp)) {
-                       if (!rnp->expmask)
-                               rcu_initiate_boost(rnp, flags);
-                       else
-                               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-                       break;
-               }
-               if (rnp->parent == NULL) {
-                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-                       if (wake) {
-                               smp_mb(); /* EGP done before wake_up(). */
-                               swake_up(&rsp->expedited_wq);
-                       }
-                       break;
-               }
-               mask = rnp->grpmask;
-               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
-               rnp = rnp->parent;
-               raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
-               WARN_ON_ONCE(!(rnp->expmask & mask));
-               rnp->expmask &= ~mask;
-       }
-}
-
-/*
- * Report expedited quiescent state for specified node.  This is a
- * lock-acquisition wrapper function for __rcu_report_exp_rnp().
- *
- * Caller must hold the rcu_state's exp_mutex.
- */
-static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
-                                             struct rcu_node *rnp, bool wake)
-{
-       unsigned long flags;
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       __rcu_report_exp_rnp(rsp, rnp, wake, flags);
-}
-
-/*
- * Report expedited quiescent state for multiple CPUs, all covered by the
- * specified leaf rcu_node structure.  Caller must hold the rcu_state's
- * exp_mutex.
- */
-static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
-                                   unsigned long mask, bool wake)
-{
-       unsigned long flags;
-
-       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       if (!(rnp->expmask & mask)) {
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               return;
-       }
-       rnp->expmask &= ~mask;
-       __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
-}
-
-/*
- * Report expedited quiescent state for specified rcu_data (CPU).
- */
-static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
-                              bool wake)
-{
-       rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
-}
-
-/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
-static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
-                              unsigned long s)
-{
-       if (rcu_exp_gp_seq_done(rsp, s)) {
-               trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
-               /* Ensure test happens before caller kfree(). */
-               smp_mb__before_atomic(); /* ^^^ */
-               atomic_long_inc(stat);
-               return true;
-       }
-       return false;
-}
-
-/*
- * Funnel-lock acquisition for expedited grace periods.  Returns true
- * if some other task completed an expedited grace period that this task
- * can piggy-back on, and with no mutex held.  Otherwise, returns false
- * with the mutex held, indicating that the caller must actually do the
- * expedited grace period.
- */
-static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
-{
-       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
-       struct rcu_node *rnp = rdp->mynode;
-       struct rcu_node *rnp_root = rcu_get_root(rsp);
-
-       /* Low-contention fastpath. */
-       if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
-           (rnp == rnp_root ||
-            ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
-           !mutex_is_locked(&rsp->exp_mutex) &&
-           mutex_trylock(&rsp->exp_mutex))
-               goto fastpath;
-
-       /*
-        * Each pass through the following loop works its way up
-        * the rcu_node tree, returning if others have done the work or
-        * otherwise falls through to acquire rsp->exp_mutex.  The mapping
-        * from CPU to rcu_node structure can be inexact, as it is just
-        * promoting locality and is not strictly needed for correctness.
-        */
-       for (; rnp != NULL; rnp = rnp->parent) {
-               if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
-                       return true;
-
-               /* Work not done, either wait here or go up. */
-               spin_lock(&rnp->exp_lock);
-               if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
-
-                       /* Someone else doing GP, so wait for them. */
-                       spin_unlock(&rnp->exp_lock);
-                       trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
-                                                 rnp->grplo, rnp->grphi,
-                                                 TPS("wait"));
-                       wait_event(rnp->exp_wq[(s >> 1) & 0x3],
-                                  sync_exp_work_done(rsp,
-                                                     &rdp->exp_workdone2, s));
-                       return true;
-               }
-               rnp->exp_seq_rq = s; /* Followers can wait on us. */
-               spin_unlock(&rnp->exp_lock);
-               trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
-                                         rnp->grphi, TPS("nxtlvl"));
-       }
-       mutex_lock(&rsp->exp_mutex);
-fastpath:
-       if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
-               mutex_unlock(&rsp->exp_mutex);
-               return true;
-       }
-       rcu_exp_gp_seq_start(rsp);
-       trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
-       return false;
-}
-
-/* Invoked on each online non-idle CPU for expedited quiescent state. */
-static void sync_sched_exp_handler(void *data)
-{
-       struct rcu_data *rdp;
-       struct rcu_node *rnp;
-       struct rcu_state *rsp = data;
-
-       rdp = this_cpu_ptr(rsp->rda);
-       rnp = rdp->mynode;
-       if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
-           __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
-               return;
-       if (rcu_is_cpu_rrupt_from_idle()) {
-               rcu_report_exp_rdp(&rcu_sched_state,
-                                  this_cpu_ptr(&rcu_sched_data), true);
-               return;
-       }
-       __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
-       resched_cpu(smp_processor_id());
-}
-
-/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
-static void sync_sched_exp_online_cleanup(int cpu)
-{
-       struct rcu_data *rdp;
-       int ret;
-       struct rcu_node *rnp;
-       struct rcu_state *rsp = &rcu_sched_state;
-
-       rdp = per_cpu_ptr(rsp->rda, cpu);
-       rnp = rdp->mynode;
-       if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
-               return;
-       ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
-       WARN_ON_ONCE(ret);
-}
-
-/*
- * Select the nodes that the upcoming expedited grace period needs
- * to wait for.
- */
-static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
-                                    smp_call_func_t func)
-{
-       int cpu;
-       unsigned long flags;
-       unsigned long mask;
-       unsigned long mask_ofl_test;
-       unsigned long mask_ofl_ipi;
-       int ret;
-       struct rcu_node *rnp;
-
-       sync_exp_reset_tree(rsp);
-       rcu_for_each_leaf_node(rsp, rnp) {
-               raw_spin_lock_irqsave_rcu_node(rnp, flags);
-
-               /* Each pass checks a CPU for identity, offline, and idle. */
-               mask_ofl_test = 0;
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++) {
-                       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
-                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
-
-                       if (raw_smp_processor_id() == cpu ||
-                           !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
-                               mask_ofl_test |= rdp->grpmask;
-               }
-               mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
-
-               /*
-                * Need to wait for any blocked tasks as well.  Note that
-                * additional blocking tasks will also block the expedited
-                * GP until such time as the ->expmask bits are cleared.
-                */
-               if (rcu_preempt_has_tasks(rnp))
-                       rnp->exp_tasks = rnp->blkd_tasks.next;
-               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-
-               /* IPI the remaining CPUs for expedited quiescent state. */
-               mask = 1;
-               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
-                       if (!(mask_ofl_ipi & mask))
-                               continue;
-retry_ipi:
-                       ret = smp_call_function_single(cpu, func, rsp, 0);
-                       if (!ret) {
-                               mask_ofl_ipi &= ~mask;
-                               continue;
-                       }
-                       /* Failed, raced with offline. */
-                       raw_spin_lock_irqsave_rcu_node(rnp, flags);
-                       if (cpu_online(cpu) &&
-                           (rnp->expmask & mask)) {
-                               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-                               schedule_timeout_uninterruptible(1);
-                               if (cpu_online(cpu) &&
-                                   (rnp->expmask & mask))
-                                       goto retry_ipi;
-                               raw_spin_lock_irqsave_rcu_node(rnp, flags);
-                       }
-                       if (!(rnp->expmask & mask))
-                               mask_ofl_ipi &= ~mask;
-                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
-               }
-               /* Report quiescent states for those that went offline. */
-               mask_ofl_test |= mask_ofl_ipi;
-               if (mask_ofl_test)
-                       rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
-       }
-}
-
-static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
-{
-       int cpu;
-       unsigned long jiffies_stall;
-       unsigned long jiffies_start;
-       unsigned long mask;
-       int ndetected;
-       struct rcu_node *rnp;
-       struct rcu_node *rnp_root = rcu_get_root(rsp);
-       int ret;
-
-       jiffies_stall = rcu_jiffies_till_stall_check();
-       jiffies_start = jiffies;
-
-       for (;;) {
-               ret = swait_event_timeout(
-                               rsp->expedited_wq,
-                               sync_rcu_preempt_exp_done(rnp_root),
-                               jiffies_stall);
-               if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
-                       return;
-               if (ret < 0) {
-                       /* Hit a signal, disable CPU stall warnings. */
-                       swait_event(rsp->expedited_wq,
-                                  sync_rcu_preempt_exp_done(rnp_root));
-                       return;
-               }
-               pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
-                      rsp->name);
-               ndetected = 0;
-               rcu_for_each_leaf_node(rsp, rnp) {
-                       ndetected += rcu_print_task_exp_stall(rnp);
-                       mask = 1;
-                       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
-                               struct rcu_data *rdp;
-
-                               if (!(rnp->expmask & mask))
-                                       continue;
-                               ndetected++;
-                               rdp = per_cpu_ptr(rsp->rda, cpu);
-                               pr_cont(" %d-%c%c%c", cpu,
-                                       "O."[!!cpu_online(cpu)],
-                                       "o."[!!(rdp->grpmask & rnp->expmaskinit)],
-                                       "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
-                       }
-                       mask <<= 1;
-               }
-               pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
-                       jiffies - jiffies_start, rsp->expedited_sequence,
-                       rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
-               if (ndetected) {
-                       pr_err("blocking rcu_node structures:");
-                       rcu_for_each_node_breadth_first(rsp, rnp) {
-                               if (rnp == rnp_root)
-                                       continue; /* printed unconditionally */
-                               if (sync_rcu_preempt_exp_done(rnp))
-                                       continue;
-                               pr_cont(" l=%u:%d-%d:%#lx/%c",
-                                       rnp->level, rnp->grplo, rnp->grphi,
-                                       rnp->expmask,
-                                       ".T"[!!rnp->exp_tasks]);
-                       }
-                       pr_cont("\n");
-               }
-               rcu_for_each_leaf_node(rsp, rnp) {
-                       mask = 1;
-                       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
-                               if (!(rnp->expmask & mask))
-                                       continue;
-                               dump_cpu_task(cpu);
-                       }
-               }
-               jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
-       }
-}
-
-/*
- * Wait for the current expedited grace period to complete, and then
- * wake up everyone who piggybacked on the just-completed expedited
- * grace period.  Also update all the ->exp_seq_rq counters as needed
- * in order to avoid counter-wrap problems.
- */
-static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
-{
-       struct rcu_node *rnp;
-
-       synchronize_sched_expedited_wait(rsp);
-       rcu_exp_gp_seq_end(rsp);
-       trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
-
-       /*
-        * Switch over to wakeup mode, allowing the next GP, but -only- the
-        * next GP, to proceed.
-        */
-       mutex_lock(&rsp->exp_wake_mutex);
-       mutex_unlock(&rsp->exp_mutex);
-
-       rcu_for_each_node_breadth_first(rsp, rnp) {
-               if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
-                       spin_lock(&rnp->exp_lock);
-                       /* Recheck, avoid hang in case someone just arrived. */
-                       if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
-                               rnp->exp_seq_rq = s;
-                       spin_unlock(&rnp->exp_lock);
-               }
-               wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
-       }
-       trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
-       mutex_unlock(&rsp->exp_wake_mutex);
-}
-
-/**
- * synchronize_sched_expedited - Brute-force RCU-sched grace period
- *
- * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
- * approach to force the grace period to end quickly.  This consumes
- * significant time on all CPUs and is unfriendly to real-time workloads,
- * so is thus not recommended for any sort of common-case code.  In fact,
- * if you are using synchronize_sched_expedited() in a loop, please
- * restructure your code to batch your updates, and then use a single
- * synchronize_sched() instead.
- *
- * This implementation can be thought of as an application of sequence
- * locking to expedited grace periods, but using the sequence counter to
- * determine when someone else has already done the work instead of for
- * retrying readers.
- */
-void synchronize_sched_expedited(void)
-{
-       unsigned long s;
-       struct rcu_state *rsp = &rcu_sched_state;
-
-       /* If only one CPU, this is automatically a grace period. */
-       if (rcu_blocking_is_gp())
-               return;
-
-       /* If expedited grace periods are prohibited, fall back to normal. */
-       if (rcu_gp_is_normal()) {
-               wait_rcu_gp(call_rcu_sched);
-               return;
-       }
-
-       /* Take a snapshot of the sequence number.  */
-       s = rcu_exp_gp_seq_snap(rsp);
-       if (exp_funnel_lock(rsp, s))
-               return;  /* Someone else did our work for us. */
-
-       /* Initialize the rcu_node tree in preparation for the wait. */
-       sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
-
-       /* Wait and clean up, including waking everyone. */
-       rcu_exp_wait_wake(rsp, s);
-}
-EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
-
 /*
  * Check to see if there is any immediate RCU-related work to be done
  * by the current CPU, for the specified type of RCU, returning 1 if so.
@@ -4281,7 +3747,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
 
        /* Set up local state, ensuring consistent view of global state. */
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
-       rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
+       rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
        rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
        WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
        WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
@@ -4364,9 +3830,6 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
-       if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
-               return;
-
        /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
        mask = rdp->grpmask;
        raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
@@ -4751,4 +4214,5 @@ void __init rcu_init(void)
                rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
 }
 
+#include "tree_exp.h"
 #include "tree_plugin.h"
index e3959f5e6ddf730e07b433acd3eefc2707387996..f714f873bf9daea68d6e469a9fa3d8ef38c1f125 100644 (file)
@@ -253,6 +253,13 @@ struct rcu_node {
        wait_queue_head_t exp_wq[4];
 } ____cacheline_internodealigned_in_smp;
 
+/*
+ * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
+ * are indexed relative to this interval rather than the global CPU ID space.
+ * This generates the bit for a CPU in node-local masks.
+ */
+#define leaf_node_cpu_bit(rnp, cpu) (1UL << ((cpu) - (rnp)->grplo))
+
 /*
  * Do a full breadth-first scan of the rcu_node structures for the
  * specified rcu_state structure.
@@ -280,6 +287,14 @@ struct rcu_node {
        for ((rnp) = (rsp)->level[rcu_num_lvls - 1]; \
             (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
 
+/*
+ * Iterate over all possible CPUs in a leaf RCU node.
+ */
+#define for_each_leaf_node_possible_cpu(rnp, cpu) \
+       for ((cpu) = cpumask_next(rnp->grplo - 1, cpu_possible_mask); \
+            cpu <= rnp->grphi; \
+            cpu = cpumask_next((cpu), cpu_possible_mask))
+
 /*
  * Union to allow "aggregate OR" operation on the need for a quiescent
  * state by the normal and expedited grace periods.
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
new file mode 100644 (file)
index 0000000..6d86ab6
--- /dev/null
@@ -0,0 +1,655 @@
+/*
+ * RCU expedited grace periods
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2016
+ *
+ * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+/* Wrapper functions for expedited grace periods.  */
+static void rcu_exp_gp_seq_start(struct rcu_state *rsp)
+{
+       rcu_seq_start(&rsp->expedited_sequence);
+}
+static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
+{
+       rcu_seq_end(&rsp->expedited_sequence);
+       smp_mb(); /* Ensure that consecutive grace periods serialize. */
+}
+static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
+{
+       unsigned long s;
+
+       smp_mb(); /* Caller's modifications seen first by other CPUs. */
+       s = rcu_seq_snap(&rsp->expedited_sequence);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
+       return s;
+}
+static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
+{
+       return rcu_seq_done(&rsp->expedited_sequence, s);
+}
+
+/*
+ * Reset the ->expmaskinit values in the rcu_node tree to reflect any
+ * recent CPU-online activity.  Note that these masks are not cleared
+ * when CPUs go offline, so they reflect the union of all CPUs that have
+ * ever been online.  This means that this function normally takes its
+ * no-work-to-do fastpath.
+ */
+static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
+{
+       bool done;
+       unsigned long flags;
+       unsigned long mask;
+       unsigned long oldmask;
+       int ncpus = READ_ONCE(rsp->ncpus);
+       struct rcu_node *rnp;
+       struct rcu_node *rnp_up;
+
+       /* If no new CPUs onlined since last time, nothing to do. */
+       if (likely(ncpus == rsp->ncpus_snap))
+               return;
+       rsp->ncpus_snap = ncpus;
+
+       /*
+        * Each pass through the following loop propagates newly onlined
+        * CPUs for the current rcu_node structure up the rcu_node tree.
+        */
+       rcu_for_each_leaf_node(rsp, rnp) {
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+               if (rnp->expmaskinit == rnp->expmaskinitnext) {
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+                       continue;  /* No new CPUs, nothing to do. */
+               }
+
+               /* Update this node's mask, track old value for propagation. */
+               oldmask = rnp->expmaskinit;
+               rnp->expmaskinit = rnp->expmaskinitnext;
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+
+               /* If was already nonzero, nothing to propagate. */
+               if (oldmask)
+                       continue;
+
+               /* Propagate the new CPU up the tree. */
+               mask = rnp->grpmask;
+               rnp_up = rnp->parent;
+               done = false;
+               while (rnp_up) {
+                       raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
+                       if (rnp_up->expmaskinit)
+                               done = true;
+                       rnp_up->expmaskinit |= mask;
+                       raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
+                       if (done)
+                               break;
+                       mask = rnp_up->grpmask;
+                       rnp_up = rnp_up->parent;
+               }
+       }
+}
+
+/*
+ * Reset the ->expmask values in the rcu_node tree in preparation for
+ * a new expedited grace period.
+ */
+static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
+{
+       unsigned long flags;
+       struct rcu_node *rnp;
+
+       sync_exp_reset_tree_hotplug(rsp);
+       rcu_for_each_node_breadth_first(rsp, rnp) {
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+               WARN_ON_ONCE(rnp->expmask);
+               rnp->expmask = rnp->expmaskinit;
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       }
+}
+
+/*
+ * Return non-zero if there is no RCU expedited grace period in progress
+ * for the specified rcu_node structure, in other words, if all CPUs and
+ * tasks covered by the specified rcu_node structure have done their bit
+ * for the current expedited grace period.  Works only for preemptible
+ * RCU -- other RCU implementation use other means.
+ *
+ * Caller must hold the rcu_state's exp_mutex.
+ */
+static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
+{
+       return rnp->exp_tasks == NULL &&
+              READ_ONCE(rnp->expmask) == 0;
+}
+
+/*
+ * Report the exit from RCU read-side critical section for the last task
+ * that queued itself during or before the current expedited preemptible-RCU
+ * grace period.  This event is reported either to the rcu_node structure on
+ * which the task was queued or to one of that rcu_node structure's ancestors,
+ * recursively up the tree.  (Calm down, calm down, we do the recursion
+ * iteratively!)
+ *
+ * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
+ * structure's ->lock.
+ */
+static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+                                bool wake, unsigned long flags)
+       __releases(rnp->lock)
+{
+       unsigned long mask;
+
+       for (;;) {
+               if (!sync_rcu_preempt_exp_done(rnp)) {
+                       if (!rnp->expmask)
+                               rcu_initiate_boost(rnp, flags);
+                       else
+                               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+                       break;
+               }
+               if (rnp->parent == NULL) {
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+                       if (wake) {
+                               smp_mb(); /* EGP done before wake_up(). */
+                               swake_up(&rsp->expedited_wq);
+                       }
+                       break;
+               }
+               mask = rnp->grpmask;
+               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
+               rnp = rnp->parent;
+               raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
+               WARN_ON_ONCE(!(rnp->expmask & mask));
+               rnp->expmask &= ~mask;
+       }
+}
+
+/*
+ * Report expedited quiescent state for specified node.  This is a
+ * lock-acquisition wrapper function for __rcu_report_exp_rnp().
+ *
+ * Caller must hold the rcu_state's exp_mutex.
+ */
+static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
+                                             struct rcu_node *rnp, bool wake)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       __rcu_report_exp_rnp(rsp, rnp, wake, flags);
+}
+
+/*
+ * Report expedited quiescent state for multiple CPUs, all covered by the
+ * specified leaf rcu_node structure.  Caller must hold the rcu_state's
+ * exp_mutex.
+ */
+static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
+                                   unsigned long mask, bool wake)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+       if (!(rnp->expmask & mask)) {
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               return;
+       }
+       rnp->expmask &= ~mask;
+       __rcu_report_exp_rnp(rsp, rnp, wake, flags); /* Releases rnp->lock. */
+}
+
+/*
+ * Report expedited quiescent state for specified rcu_data (CPU).
+ */
+static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
+                              bool wake)
+{
+       rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
+}
+
+/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
+static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
+                              unsigned long s)
+{
+       if (rcu_exp_gp_seq_done(rsp, s)) {
+               trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
+               /* Ensure test happens before caller kfree(). */
+               smp_mb__before_atomic(); /* ^^^ */
+               atomic_long_inc(stat);
+               return true;
+       }
+       return false;
+}
+
+/*
+ * Funnel-lock acquisition for expedited grace periods.  Returns true
+ * if some other task completed an expedited grace period that this task
+ * can piggy-back on, and with no mutex held.  Otherwise, returns false
+ * with the mutex held, indicating that the caller must actually do the
+ * expedited grace period.
+ */
+static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
+       struct rcu_node *rnp = rdp->mynode;
+       struct rcu_node *rnp_root = rcu_get_root(rsp);
+
+       /* Low-contention fastpath. */
+       if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
+           (rnp == rnp_root ||
+            ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
+           mutex_trylock(&rsp->exp_mutex))
+               goto fastpath;
+
+       /*
+        * Each pass through the following loop works its way up
+        * the rcu_node tree, returning if others have done the work or
+        * otherwise falls through to acquire rsp->exp_mutex.  The mapping
+        * from CPU to rcu_node structure can be inexact, as it is just
+        * promoting locality and is not strictly needed for correctness.
+        */
+       for (; rnp != NULL; rnp = rnp->parent) {
+               if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
+                       return true;
+
+               /* Work not done, either wait here or go up. */
+               spin_lock(&rnp->exp_lock);
+               if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
+
+                       /* Someone else doing GP, so wait for them. */
+                       spin_unlock(&rnp->exp_lock);
+                       trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
+                                                 rnp->grplo, rnp->grphi,
+                                                 TPS("wait"));
+                       wait_event(rnp->exp_wq[(s >> 1) & 0x3],
+                                  sync_exp_work_done(rsp,
+                                                     &rdp->exp_workdone2, s));
+                       return true;
+               }
+               rnp->exp_seq_rq = s; /* Followers can wait on us. */
+               spin_unlock(&rnp->exp_lock);
+               trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
+                                         rnp->grphi, TPS("nxtlvl"));
+       }
+       mutex_lock(&rsp->exp_mutex);
+fastpath:
+       if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
+               mutex_unlock(&rsp->exp_mutex);
+               return true;
+       }
+       rcu_exp_gp_seq_start(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
+       return false;
+}
+
+/* Invoked on each online non-idle CPU for expedited quiescent state. */
+static void sync_sched_exp_handler(void *data)
+{
+       struct rcu_data *rdp;
+       struct rcu_node *rnp;
+       struct rcu_state *rsp = data;
+
+       rdp = this_cpu_ptr(rsp->rda);
+       rnp = rdp->mynode;
+       if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
+           __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
+               return;
+       if (rcu_is_cpu_rrupt_from_idle()) {
+               rcu_report_exp_rdp(&rcu_sched_state,
+                                  this_cpu_ptr(&rcu_sched_data), true);
+               return;
+       }
+       __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
+       resched_cpu(smp_processor_id());
+}
+
+/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
+static void sync_sched_exp_online_cleanup(int cpu)
+{
+       struct rcu_data *rdp;
+       int ret;
+       struct rcu_node *rnp;
+       struct rcu_state *rsp = &rcu_sched_state;
+
+       rdp = per_cpu_ptr(rsp->rda, cpu);
+       rnp = rdp->mynode;
+       if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
+               return;
+       ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
+       WARN_ON_ONCE(ret);
+}
+
+/*
+ * Select the nodes that the upcoming expedited grace period needs
+ * to wait for.
+ */
+static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
+                                    smp_call_func_t func)
+{
+       int cpu;
+       unsigned long flags;
+       unsigned long mask_ofl_test;
+       unsigned long mask_ofl_ipi;
+       int ret;
+       struct rcu_node *rnp;
+
+       sync_exp_reset_tree(rsp);
+       rcu_for_each_leaf_node(rsp, rnp) {
+               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+
+               /* Each pass checks a CPU for identity, offline, and idle. */
+               mask_ofl_test = 0;
+               for_each_leaf_node_possible_cpu(rnp, cpu) {
+                       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+                       struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
+                       if (raw_smp_processor_id() == cpu ||
+                           !(atomic_add_return(0, &rdtp->dynticks) & 0x1))
+                               mask_ofl_test |= rdp->grpmask;
+               }
+               mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
+
+               /*
+                * Need to wait for any blocked tasks as well.  Note that
+                * additional blocking tasks will also block the expedited
+                * GP until such time as the ->expmask bits are cleared.
+                */
+               if (rcu_preempt_has_tasks(rnp))
+                       rnp->exp_tasks = rnp->blkd_tasks.next;
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+
+               /* IPI the remaining CPUs for expedited quiescent state. */
+               for_each_leaf_node_possible_cpu(rnp, cpu) {
+                       unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
+                       if (!(mask_ofl_ipi & mask))
+                               continue;
+retry_ipi:
+                       ret = smp_call_function_single(cpu, func, rsp, 0);
+                       if (!ret) {
+                               mask_ofl_ipi &= ~mask;
+                               continue;
+                       }
+                       /* Failed, raced with offline. */
+                       raw_spin_lock_irqsave_rcu_node(rnp, flags);
+                       if (cpu_online(cpu) &&
+                           (rnp->expmask & mask)) {
+                               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+                               schedule_timeout_uninterruptible(1);
+                               if (cpu_online(cpu) &&
+                                   (rnp->expmask & mask))
+                                       goto retry_ipi;
+                               raw_spin_lock_irqsave_rcu_node(rnp, flags);
+                       }
+                       if (!(rnp->expmask & mask))
+                               mask_ofl_ipi &= ~mask;
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+               }
+               /* Report quiescent states for those that went offline. */
+               mask_ofl_test |= mask_ofl_ipi;
+               if (mask_ofl_test)
+                       rcu_report_exp_cpu_mult(rsp, rnp, mask_ofl_test, false);
+       }
+}
+
+static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
+{
+       int cpu;
+       unsigned long jiffies_stall;
+       unsigned long jiffies_start;
+       unsigned long mask;
+       int ndetected;
+       struct rcu_node *rnp;
+       struct rcu_node *rnp_root = rcu_get_root(rsp);
+       int ret;
+
+       jiffies_stall = rcu_jiffies_till_stall_check();
+       jiffies_start = jiffies;
+
+       for (;;) {
+               ret = swait_event_timeout(
+                               rsp->expedited_wq,
+                               sync_rcu_preempt_exp_done(rnp_root),
+                               jiffies_stall);
+               if (ret > 0 || sync_rcu_preempt_exp_done(rnp_root))
+                       return;
+               if (ret < 0) {
+                       /* Hit a signal, disable CPU stall warnings. */
+                       swait_event(rsp->expedited_wq,
+                                  sync_rcu_preempt_exp_done(rnp_root));
+                       return;
+               }
+               pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
+                      rsp->name);
+               ndetected = 0;
+               rcu_for_each_leaf_node(rsp, rnp) {
+                       ndetected += rcu_print_task_exp_stall(rnp);
+                       for_each_leaf_node_possible_cpu(rnp, cpu) {
+                               struct rcu_data *rdp;
+
+                               mask = leaf_node_cpu_bit(rnp, cpu);
+                               if (!(rnp->expmask & mask))
+                                       continue;
+                               ndetected++;
+                               rdp = per_cpu_ptr(rsp->rda, cpu);
+                               pr_cont(" %d-%c%c%c", cpu,
+                                       "O."[!!cpu_online(cpu)],
+                                       "o."[!!(rdp->grpmask & rnp->expmaskinit)],
+                                       "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
+                       }
+               }
+               pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
+                       jiffies - jiffies_start, rsp->expedited_sequence,
+                       rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
+               if (ndetected) {
+                       pr_err("blocking rcu_node structures:");
+                       rcu_for_each_node_breadth_first(rsp, rnp) {
+                               if (rnp == rnp_root)
+                                       continue; /* printed unconditionally */
+                               if (sync_rcu_preempt_exp_done(rnp))
+                                       continue;
+                               pr_cont(" l=%u:%d-%d:%#lx/%c",
+                                       rnp->level, rnp->grplo, rnp->grphi,
+                                       rnp->expmask,
+                                       ".T"[!!rnp->exp_tasks]);
+                       }
+                       pr_cont("\n");
+               }
+               rcu_for_each_leaf_node(rsp, rnp) {
+                       for_each_leaf_node_possible_cpu(rnp, cpu) {
+                               mask = leaf_node_cpu_bit(rnp, cpu);
+                               if (!(rnp->expmask & mask))
+                                       continue;
+                               dump_cpu_task(cpu);
+                       }
+               }
+               jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
+       }
+}
+
+/*
+ * Wait for the current expedited grace period to complete, and then
+ * wake up everyone who piggybacked on the just-completed expedited
+ * grace period.  Also update all the ->exp_seq_rq counters as needed
+ * in order to avoid counter-wrap problems.
+ */
+static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
+{
+       struct rcu_node *rnp;
+
+       synchronize_sched_expedited_wait(rsp);
+       rcu_exp_gp_seq_end(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
+
+       /*
+        * Switch over to wakeup mode, allowing the next GP, but -only- the
+        * next GP, to proceed.
+        */
+       mutex_lock(&rsp->exp_wake_mutex);
+       mutex_unlock(&rsp->exp_mutex);
+
+       rcu_for_each_node_breadth_first(rsp, rnp) {
+               if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
+                       spin_lock(&rnp->exp_lock);
+                       /* Recheck, avoid hang in case someone just arrived. */
+                       if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
+                               rnp->exp_seq_rq = s;
+                       spin_unlock(&rnp->exp_lock);
+               }
+               wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
+       }
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
+       mutex_unlock(&rsp->exp_wake_mutex);
+}
+
+/**
+ * synchronize_sched_expedited - Brute-force RCU-sched grace period
+ *
+ * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
+ * approach to force the grace period to end quickly.  This consumes
+ * significant time on all CPUs and is unfriendly to real-time workloads,
+ * so is thus not recommended for any sort of common-case code.  In fact,
+ * if you are using synchronize_sched_expedited() in a loop, please
+ * restructure your code to batch your updates, and then use a single
+ * synchronize_sched() instead.
+ *
+ * This implementation can be thought of as an application of sequence
+ * locking to expedited grace periods, but using the sequence counter to
+ * determine when someone else has already done the work instead of for
+ * retrying readers.
+ */
+void synchronize_sched_expedited(void)
+{
+       unsigned long s;
+       struct rcu_state *rsp = &rcu_sched_state;
+
+       /* If only one CPU, this is automatically a grace period. */
+       if (rcu_blocking_is_gp())
+               return;
+
+       /* If expedited grace periods are prohibited, fall back to normal. */
+       if (rcu_gp_is_normal()) {
+               wait_rcu_gp(call_rcu_sched);
+               return;
+       }
+
+       /* Take a snapshot of the sequence number.  */
+       s = rcu_exp_gp_seq_snap(rsp);
+       if (exp_funnel_lock(rsp, s))
+               return;  /* Someone else did our work for us. */
+
+       /* Initialize the rcu_node tree in preparation for the wait. */
+       sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
+
+       /* Wait and clean up, including waking everyone. */
+       rcu_exp_wait_wake(rsp, s);
+}
+EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+
+#ifdef CONFIG_PREEMPT_RCU
+
+/*
+ * Remote handler for smp_call_function_single().  If there is an
+ * RCU read-side critical section in effect, request that the
+ * next rcu_read_unlock() record the quiescent state up the
+ * ->expmask fields in the rcu_node tree.  Otherwise, immediately
+ * report the quiescent state.
+ */
+static void sync_rcu_exp_handler(void *info)
+{
+       struct rcu_data *rdp;
+       struct rcu_state *rsp = info;
+       struct task_struct *t = current;
+
+       /*
+        * Within an RCU read-side critical section, request that the next
+        * rcu_read_unlock() report.  Unless this RCU read-side critical
+        * section has already blocked, in which case it is already set
+        * up for the expedited grace period to wait on it.
+        */
+       if (t->rcu_read_lock_nesting > 0 &&
+           !t->rcu_read_unlock_special.b.blocked) {
+               t->rcu_read_unlock_special.b.exp_need_qs = true;
+               return;
+       }
+
+       /*
+        * We are either exiting an RCU read-side critical section (negative
+        * values of t->rcu_read_lock_nesting) or are not in one at all
+        * (zero value of t->rcu_read_lock_nesting).  Or we are in an RCU
+        * read-side critical section that blocked before this expedited
+        * grace period started.  Either way, we can immediately report
+        * the quiescent state.
+        */
+       rdp = this_cpu_ptr(rsp->rda);
+       rcu_report_exp_rdp(rsp, rdp, true);
+}
+
+/**
+ * synchronize_rcu_expedited - Brute-force RCU grace period
+ *
+ * Wait for an RCU-preempt grace period, but expedite it.  The basic
+ * idea is to IPI all non-idle non-nohz online CPUs.  The IPI handler
+ * checks whether the CPU is in an RCU-preempt critical section, and
+ * if so, it sets a flag that causes the outermost rcu_read_unlock()
+ * to report the quiescent state.  On the other hand, if the CPU is
+ * not in an RCU read-side critical section, the IPI handler reports
+ * the quiescent state immediately.
+ *
+ * Although this is a greate improvement over previous expedited
+ * implementations, it is still unfriendly to real-time workloads, so is
+ * thus not recommended for any sort of common-case code.  In fact, if
+ * you are using synchronize_rcu_expedited() in a loop, please restructure
+ * your code to batch your updates, and then Use a single synchronize_rcu()
+ * instead.
+ */
+void synchronize_rcu_expedited(void)
+{
+       struct rcu_state *rsp = rcu_state_p;
+       unsigned long s;
+
+       /* If expedited grace periods are prohibited, fall back to normal. */
+       if (rcu_gp_is_normal()) {
+               wait_rcu_gp(call_rcu);
+               return;
+       }
+
+       s = rcu_exp_gp_seq_snap(rsp);
+       if (exp_funnel_lock(rsp, s))
+               return;  /* Someone else did our work for us. */
+
+       /* Initialize the rcu_node tree in preparation for the wait. */
+       sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
+
+       /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */
+       rcu_exp_wait_wake(rsp, s);
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+
+/*
+ * Wait for an rcu-preempt grace period, but make it happen quickly.
+ * But because preemptible RCU does not exist, map to rcu-sched.
+ */
+void synchronize_rcu_expedited(void)
+{
+       synchronize_sched_expedited();
+}
+EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
+
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
index ff1cd4e1188d37784ea83bdb700de183531aa7bd..0082fce402a0c11c922648ae0d90a9d2bd85561f 100644 (file)
@@ -79,8 +79,6 @@ static void __init rcu_bootup_announce_oddness(void)
                pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
        if (IS_ENABLED(CONFIG_PROVE_RCU))
                pr_info("\tRCU lockdep checking is enabled.\n");
-       if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_RUNNABLE))
-               pr_info("\tRCU torture testing starts during boot.\n");
        if (RCU_NUM_LVLS >= 4)
                pr_info("\tFour(or more)-level hierarchy is enabled.\n");
        if (RCU_FANOUT_LEAF != 16)
@@ -681,84 +679,6 @@ void synchronize_rcu(void)
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
-/*
- * Remote handler for smp_call_function_single().  If there is an
- * RCU read-side critical section in effect, request that the
- * next rcu_read_unlock() record the quiescent state up the
- * ->expmask fields in the rcu_node tree.  Otherwise, immediately
- * report the quiescent state.
- */
-static void sync_rcu_exp_handler(void *info)
-{
-       struct rcu_data *rdp;
-       struct rcu_state *rsp = info;
-       struct task_struct *t = current;
-
-       /*
-        * Within an RCU read-side critical section, request that the next
-        * rcu_read_unlock() report.  Unless this RCU read-side critical
-        * section has already blocked, in which case it is already set
-        * up for the expedited grace period to wait on it.
-        */
-       if (t->rcu_read_lock_nesting > 0 &&
-           !t->rcu_read_unlock_special.b.blocked) {
-               t->rcu_read_unlock_special.b.exp_need_qs = true;
-               return;
-       }
-
-       /*
-        * We are either exiting an RCU read-side critical section (negative
-        * values of t->rcu_read_lock_nesting) or are not in one at all
-        * (zero value of t->rcu_read_lock_nesting).  Or we are in an RCU
-        * read-side critical section that blocked before this expedited
-        * grace period started.  Either way, we can immediately report
-        * the quiescent state.
-        */
-       rdp = this_cpu_ptr(rsp->rda);
-       rcu_report_exp_rdp(rsp, rdp, true);
-}
-
-/**
- * synchronize_rcu_expedited - Brute-force RCU grace period
- *
- * Wait for an RCU-preempt grace period, but expedite it.  The basic
- * idea is to IPI all non-idle non-nohz online CPUs.  The IPI handler
- * checks whether the CPU is in an RCU-preempt critical section, and
- * if so, it sets a flag that causes the outermost rcu_read_unlock()
- * to report the quiescent state.  On the other hand, if the CPU is
- * not in an RCU read-side critical section, the IPI handler reports
- * the quiescent state immediately.
- *
- * Although this is a greate improvement over previous expedited
- * implementations, it is still unfriendly to real-time workloads, so is
- * thus not recommended for any sort of common-case code.  In fact, if
- * you are using synchronize_rcu_expedited() in a loop, please restructure
- * your code to batch your updates, and then Use a single synchronize_rcu()
- * instead.
- */
-void synchronize_rcu_expedited(void)
-{
-       struct rcu_state *rsp = rcu_state_p;
-       unsigned long s;
-
-       /* If expedited grace periods are prohibited, fall back to normal. */
-       if (rcu_gp_is_normal()) {
-               wait_rcu_gp(call_rcu);
-               return;
-       }
-
-       s = rcu_exp_gp_seq_snap(rsp);
-       if (exp_funnel_lock(rsp, s))
-               return;  /* Someone else did our work for us. */
-
-       /* Initialize the rcu_node tree in preparation for the wait. */
-       sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
-
-       /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */
-       rcu_exp_wait_wake(rsp, s);
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
-
 /**
  * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
  *
@@ -882,16 +802,6 @@ static void rcu_preempt_check_callbacks(void)
 {
 }
 
-/*
- * Wait for an rcu-preempt grace period, but make it happen quickly.
- * But because preemptible RCU does not exist, map to rcu-sched.
- */
-void synchronize_rcu_expedited(void)
-{
-       synchronize_sched_expedited();
-}
-EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
-
 /*
  * Because preemptible RCU does not exist, rcu_barrier() is just
  * another name for rcu_barrier_sched().
@@ -1254,8 +1164,9 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
                return;
        if (!zalloc_cpumask_var(&cm, GFP_KERNEL))
                return;
-       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
-               if ((mask & 0x1) && cpu != outgoingcpu)
+       for_each_leaf_node_possible_cpu(rnp, cpu)
+               if ((mask & leaf_node_cpu_bit(rnp, cpu)) &&
+                   cpu != outgoingcpu)
                        cpumask_set_cpu(cpu, cm);
        if (cpumask_weight(cm) == 0)
                cpumask_setall(cm);
index 3e888cd5a5941c43dd05f52209f7f5e884a9bf43..f0d8322bc3ec179f7a3cfa2f9bef7a0d8f8cb950 100644 (file)
@@ -528,6 +528,7 @@ static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
 module_param(rcu_task_stall_timeout, int, 0644);
 
 static void rcu_spawn_tasks_kthread(void);
+static struct task_struct *rcu_tasks_kthread_ptr;
 
 /*
  * Post an RCU-tasks callback.  First call must be from process context
@@ -537,6 +538,7 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
 {
        unsigned long flags;
        bool needwake;
+       bool havetask = READ_ONCE(rcu_tasks_kthread_ptr);
 
        rhp->next = NULL;
        rhp->func = func;
@@ -545,7 +547,9 @@ void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
        *rcu_tasks_cbs_tail = rhp;
        rcu_tasks_cbs_tail = &rhp->next;
        raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
-       if (needwake) {
+       /* We can't create the thread unless interrupts are enabled. */
+       if ((needwake && havetask) ||
+           (!havetask && !irqs_disabled_flags(flags))) {
                rcu_spawn_tasks_kthread();
                wake_up(&rcu_tasks_cbs_wq);
        }
@@ -790,7 +794,6 @@ static int __noreturn rcu_tasks_kthread(void *arg)
 static void rcu_spawn_tasks_kthread(void)
 {
        static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
-       static struct task_struct *rcu_tasks_kthread_ptr;
        struct task_struct *t;
 
        if (READ_ONCE(rcu_tasks_kthread_ptr)) {
index 51d7105f529a5527ee8c90ac789f08583027a373..5c883fe8e44016df1109e8f66dd73377dfecb5e9 100644 (file)
@@ -1937,7 +1937,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  * chain to provide order. Instead we do:
  *
  *   1) smp_store_release(X->on_cpu, 0)
- *   2) smp_cond_acquire(!X->on_cpu)
+ *   2) smp_cond_load_acquire(!X->on_cpu)
  *
  * Example:
  *
@@ -1948,7 +1948,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  *   sched-out X
  *   smp_store_release(X->on_cpu, 0);
  *
- *                    smp_cond_acquire(!X->on_cpu);
+ *                    smp_cond_load_acquire(&X->on_cpu, !VAL);
  *                    X->state = WAKING
  *                    set_task_cpu(X,2)
  *
@@ -1974,7 +1974,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
  * This means that any means of doing remote wakeups must order the CPU doing
  * the wakeup against the CPU the task is going to end up running on. This,
  * however, is already required for the regular Program-Order guarantee above,
- * since the waking CPU is the one issueing the ACQUIRE (smp_cond_acquire).
+ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
  *
  */
 
@@ -2047,7 +2047,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
         * This ensures that tasks getting woken will be fully ordered against
         * their previous state and preserve Program Order.
         */
-       smp_cond_acquire(!p->on_cpu);
+       smp_cond_load_acquire(&p->on_cpu, !VAL);
 
        p->sched_contributes_to_load = !!task_contributes_to_load(p);
        p->state = TASK_WAKING;
@@ -2342,11 +2342,11 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
 
        __sched_fork(clone_flags, p);
        /*
-        * We mark the process as running here. This guarantees that
+        * We mark the process as NEW here. This guarantees that
         * nobody will actually run it, and a signal or other external
         * event cannot wake it up and insert it on the runqueue either.
         */
-       p->state = TASK_RUNNING;
+       p->state = TASK_NEW;
 
        /*
         * Make sure we do not leak PI boosting priority to the child.
@@ -2383,8 +2383,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
                p->sched_class = &fair_sched_class;
        }
 
-       if (p->sched_class->task_fork)
-               p->sched_class->task_fork(p);
+       init_entity_runnable_average(&p->se);
 
        /*
         * The child is not yet in the pid-hash so no cgroup attach races,
@@ -2394,7 +2393,13 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
         * Silence PROVE_RCU.
         */
        raw_spin_lock_irqsave(&p->pi_lock, flags);
-       set_task_cpu(p, cpu);
+       /*
+        * We're setting the cpu for the first time, we don't migrate,
+        * so use __set_task_cpu().
+        */
+       __set_task_cpu(p, cpu);
+       if (p->sched_class->task_fork)
+               p->sched_class->task_fork(p);
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
 #ifdef CONFIG_SCHED_INFO
@@ -2526,16 +2531,18 @@ void wake_up_new_task(struct task_struct *p)
        struct rq_flags rf;
        struct rq *rq;
 
-       /* Initialize new task's runnable average */
-       init_entity_runnable_average(&p->se);
        raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
+       p->state = TASK_RUNNING;
 #ifdef CONFIG_SMP
        /*
         * Fork balancing, do it here and not earlier because:
         *  - cpus_allowed can change in the fork path
         *  - any previously selected cpu might disappear through hotplug
+        *
+        * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
+        * as we're not fully set-up yet.
         */
-       set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
+       __set_task_cpu(p, select_task_rq(p, task_cpu(p), SD_BALANCE_FORK, 0));
 #endif
        rq = __task_rq_lock(p, &rf);
        post_init_entity_util_avg(&p->se);
@@ -3161,6 +3168,9 @@ static noinline void __schedule_bug(struct task_struct *prev)
                pr_cont("\n");
        }
 #endif
+       if (panic_on_warn)
+               panic("scheduling while atomic\n");
+
        dump_stack();
        add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 }
@@ -4752,7 +4762,8 @@ out_unlock:
  * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  * @user_mask_ptr: user-space pointer to hold the current cpu mask
  *
- * Return: 0 on success. An error code otherwise.
+ * Return: size of CPU mask copied to user_mask_ptr on success. An
+ * error code otherwise.
  */
 SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
                unsigned long __user *, user_mask_ptr)
@@ -5394,13 +5405,15 @@ void idle_task_exit(void)
 /*
  * Since this CPU is going 'away' for a while, fold any nr_active delta
  * we might have. Assumes we're called after migrate_tasks() so that the
- * nr_active count is stable.
+ * nr_active count is stable. We need to take the teardown thread which
+ * is calling this into account, so we hand in adjust = 1 to the load
+ * calculation.
  *
  * Also see the comment "Global load-average calculations".
  */
 static void calc_load_migrate(struct rq *rq)
 {
-       long delta = calc_load_fold_active(rq);
+       long delta = calc_load_fold_active(rq, 1);
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 }
@@ -7231,7 +7244,6 @@ static void sched_rq_cpu_starting(unsigned int cpu)
        struct rq *rq = cpu_rq(cpu);
 
        rq->calc_load_update = calc_load_update;
-       account_reset_rq(rq);
        update_max_interval();
 }
 
@@ -7711,6 +7723,8 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
        INIT_LIST_HEAD(&tg->children);
        list_add_rcu(&tg->siblings, &parent->children);
        spin_unlock_irqrestore(&task_group_lock, flags);
+
+       online_fair_sched_group(tg);
 }
 
 /* rcu callback to free various structures associated with a task group */
@@ -7739,27 +7753,9 @@ void sched_offline_group(struct task_group *tg)
        spin_unlock_irqrestore(&task_group_lock, flags);
 }
 
-/* change task's runqueue when it moves between groups.
- *     The caller of this function should have put the task in its new group
- *     by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
- *     reflect its new group.
- */
-void sched_move_task(struct task_struct *tsk)
+static void sched_change_group(struct task_struct *tsk, int type)
 {
        struct task_group *tg;
-       int queued, running;
-       struct rq_flags rf;
-       struct rq *rq;
-
-       rq = task_rq_lock(tsk, &rf);
-
-       running = task_current(rq, tsk);
-       queued = task_on_rq_queued(tsk);
-
-       if (queued)
-               dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
-       if (unlikely(running))
-               put_prev_task(rq, tsk);
 
        /*
         * All callers are synchronized by task_rq_lock(); we do not use RCU
@@ -7772,11 +7768,37 @@ void sched_move_task(struct task_struct *tsk)
        tsk->sched_task_group = tg;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-       if (tsk->sched_class->task_move_group)
-               tsk->sched_class->task_move_group(tsk);
+       if (tsk->sched_class->task_change_group)
+               tsk->sched_class->task_change_group(tsk, type);
        else
 #endif
                set_task_rq(tsk, task_cpu(tsk));
+}
+
+/*
+ * Change task's runqueue when it moves between groups.
+ *
+ * The caller of this function should have put the task in its new group by
+ * now. This function just updates tsk->se.cfs_rq and tsk->se.parent to reflect
+ * its new group.
+ */
+void sched_move_task(struct task_struct *tsk)
+{
+       int queued, running;
+       struct rq_flags rf;
+       struct rq *rq;
+
+       rq = task_rq_lock(tsk, &rf);
+
+       running = task_current(rq, tsk);
+       queued = task_on_rq_queued(tsk);
+
+       if (queued)
+               dequeue_task(rq, tsk, DEQUEUE_SAVE | DEQUEUE_MOVE);
+       if (unlikely(running))
+               put_prev_task(rq, tsk);
+
+       sched_change_group(tsk, TASK_MOVE_GROUP);
 
        if (unlikely(running))
                tsk->sched_class->set_curr_task(rq);
@@ -8204,15 +8226,27 @@ static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
        sched_free_group(tg);
 }
 
+/*
+ * This is called before wake_up_new_task(), therefore we really only
+ * have to set its group bits, all the other stuff does not apply.
+ */
 static void cpu_cgroup_fork(struct task_struct *task)
 {
-       sched_move_task(task);
+       struct rq_flags rf;
+       struct rq *rq;
+
+       rq = task_rq_lock(task, &rf);
+
+       sched_change_group(task, TASK_SET_GROUP);
+
+       task_rq_unlock(rq, task, &rf);
 }
 
 static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
 {
        struct task_struct *task;
        struct cgroup_subsys_state *css;
+       int ret = 0;
 
        cgroup_taskset_for_each(task, css, tset) {
 #ifdef CONFIG_RT_GROUP_SCHED
@@ -8223,8 +8257,24 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
                if (task->sched_class != &fair_sched_class)
                        return -EINVAL;
 #endif
+               /*
+                * Serialize against wake_up_new_task() such that if its
+                * running, we're sure to observe its full state.
+                */
+               raw_spin_lock_irq(&task->pi_lock);
+               /*
+                * Avoid calling sched_move_task() before wake_up_new_task()
+                * has happened. This would lead to problems with PELT, due to
+                * move wanting to detach+attach while we're not attached yet.
+                */
+               if (task->state == TASK_NEW)
+                       ret = -EINVAL;
+               raw_spin_unlock_irq(&task->pi_lock);
+
+               if (ret)
+                       break;
        }
-       return 0;
+       return ret;
 }
 
 static void cpu_cgroup_attach(struct cgroup_taskset *tset)
index 41f85c4d09387a8bd03299ef00f6e79482f68b40..bc0b309c3f19e2ce7e07b2d0419d9157d423497a 100644 (file)
@@ -25,15 +25,13 @@ enum cpuacct_stat_index {
        CPUACCT_STAT_NSTATS,
 };
 
-enum cpuacct_usage_index {
-       CPUACCT_USAGE_USER,     /* ... user mode */
-       CPUACCT_USAGE_SYSTEM,   /* ... kernel mode */
-
-       CPUACCT_USAGE_NRUSAGE,
+static const char * const cpuacct_stat_desc[] = {
+       [CPUACCT_STAT_USER] = "user",
+       [CPUACCT_STAT_SYSTEM] = "system",
 };
 
 struct cpuacct_usage {
-       u64     usages[CPUACCT_USAGE_NRUSAGE];
+       u64     usages[CPUACCT_STAT_NSTATS];
 };
 
 /* track cpu usage of a group of tasks and its child groups */
@@ -108,16 +106,16 @@ static void cpuacct_css_free(struct cgroup_subsys_state *css)
 }
 
 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
-                                enum cpuacct_usage_index index)
+                                enum cpuacct_stat_index index)
 {
        struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
        u64 data;
 
        /*
-        * We allow index == CPUACCT_USAGE_NRUSAGE here to read
+        * We allow index == CPUACCT_STAT_NSTATS here to read
         * the sum of suages.
         */
-       BUG_ON(index > CPUACCT_USAGE_NRUSAGE);
+       BUG_ON(index > CPUACCT_STAT_NSTATS);
 
 #ifndef CONFIG_64BIT
        /*
@@ -126,11 +124,11 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
        raw_spin_lock_irq(&cpu_rq(cpu)->lock);
 #endif
 
-       if (index == CPUACCT_USAGE_NRUSAGE) {
+       if (index == CPUACCT_STAT_NSTATS) {
                int i = 0;
 
                data = 0;
-               for (i = 0; i < CPUACCT_USAGE_NRUSAGE; i++)
+               for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
                        data += cpuusage->usages[i];
        } else {
                data = cpuusage->usages[index];
@@ -155,7 +153,7 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
        raw_spin_lock_irq(&cpu_rq(cpu)->lock);
 #endif
 
-       for (i = 0; i < CPUACCT_USAGE_NRUSAGE; i++)
+       for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
                cpuusage->usages[i] = val;
 
 #ifndef CONFIG_64BIT
@@ -165,7 +163,7 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
 
 /* return total cpu usage (in nanoseconds) of a group */
 static u64 __cpuusage_read(struct cgroup_subsys_state *css,
-                          enum cpuacct_usage_index index)
+                          enum cpuacct_stat_index index)
 {
        struct cpuacct *ca = css_ca(css);
        u64 totalcpuusage = 0;
@@ -180,18 +178,18 @@ static u64 __cpuusage_read(struct cgroup_subsys_state *css,
 static u64 cpuusage_user_read(struct cgroup_subsys_state *css,
                              struct cftype *cft)
 {
-       return __cpuusage_read(css, CPUACCT_USAGE_USER);
+       return __cpuusage_read(css, CPUACCT_STAT_USER);
 }
 
 static u64 cpuusage_sys_read(struct cgroup_subsys_state *css,
                             struct cftype *cft)
 {
-       return __cpuusage_read(css, CPUACCT_USAGE_SYSTEM);
+       return __cpuusage_read(css, CPUACCT_STAT_SYSTEM);
 }
 
 static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
 {
-       return __cpuusage_read(css, CPUACCT_USAGE_NRUSAGE);
+       return __cpuusage_read(css, CPUACCT_STAT_NSTATS);
 }
 
 static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
@@ -213,7 +211,7 @@ static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
 }
 
 static int __cpuacct_percpu_seq_show(struct seq_file *m,
-                                    enum cpuacct_usage_index index)
+                                    enum cpuacct_stat_index index)
 {
        struct cpuacct *ca = css_ca(seq_css(m));
        u64 percpu;
@@ -229,48 +227,78 @@ static int __cpuacct_percpu_seq_show(struct seq_file *m,
 
 static int cpuacct_percpu_user_seq_show(struct seq_file *m, void *V)
 {
-       return __cpuacct_percpu_seq_show(m, CPUACCT_USAGE_USER);
+       return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_USER);
 }
 
 static int cpuacct_percpu_sys_seq_show(struct seq_file *m, void *V)
 {
-       return __cpuacct_percpu_seq_show(m, CPUACCT_USAGE_SYSTEM);
+       return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_SYSTEM);
 }
 
 static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
 {
-       return __cpuacct_percpu_seq_show(m, CPUACCT_USAGE_NRUSAGE);
+       return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_NSTATS);
 }
 
-static const char * const cpuacct_stat_desc[] = {
-       [CPUACCT_STAT_USER] = "user",
-       [CPUACCT_STAT_SYSTEM] = "system",
-};
+static int cpuacct_all_seq_show(struct seq_file *m, void *V)
+{
+       struct cpuacct *ca = css_ca(seq_css(m));
+       int index;
+       int cpu;
+
+       seq_puts(m, "cpu");
+       for (index = 0; index < CPUACCT_STAT_NSTATS; index++)
+               seq_printf(m, " %s", cpuacct_stat_desc[index]);
+       seq_puts(m, "\n");
+
+       for_each_possible_cpu(cpu) {
+               struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
+
+               seq_printf(m, "%d", cpu);
+
+               for (index = 0; index < CPUACCT_STAT_NSTATS; index++) {
+#ifndef CONFIG_64BIT
+                       /*
+                        * Take rq->lock to make 64-bit read safe on 32-bit
+                        * platforms.
+                        */
+                       raw_spin_lock_irq(&cpu_rq(cpu)->lock);
+#endif
+
+                       seq_printf(m, " %llu", cpuusage->usages[index]);
+
+#ifndef CONFIG_64BIT
+                       raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
+#endif
+               }
+               seq_puts(m, "\n");
+       }
+       return 0;
+}
 
 static int cpuacct_stats_show(struct seq_file *sf, void *v)
 {
        struct cpuacct *ca = css_ca(seq_css(sf));
+       s64 val[CPUACCT_STAT_NSTATS];
        int cpu;
-       s64 val = 0;
+       int stat;
 
+       memset(val, 0, sizeof(val));
        for_each_possible_cpu(cpu) {
-               struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
-               val += kcpustat->cpustat[CPUTIME_USER];
-               val += kcpustat->cpustat[CPUTIME_NICE];
-       }
-       val = cputime64_to_clock_t(val);
-       seq_printf(sf, "%s %lld\n", cpuacct_stat_desc[CPUACCT_STAT_USER], val);
+               u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
 
-       val = 0;
-       for_each_possible_cpu(cpu) {
-               struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu);
-               val += kcpustat->cpustat[CPUTIME_SYSTEM];
-               val += kcpustat->cpustat[CPUTIME_IRQ];
-               val += kcpustat->cpustat[CPUTIME_SOFTIRQ];
+               val[CPUACCT_STAT_USER]   += cpustat[CPUTIME_USER];
+               val[CPUACCT_STAT_USER]   += cpustat[CPUTIME_NICE];
+               val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SYSTEM];
+               val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_IRQ];
+               val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SOFTIRQ];
        }
 
-       val = cputime64_to_clock_t(val);
-       seq_printf(sf, "%s %lld\n", cpuacct_stat_desc[CPUACCT_STAT_SYSTEM], val);
+       for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
+               seq_printf(sf, "%s %lld\n",
+                          cpuacct_stat_desc[stat],
+                          cputime64_to_clock_t(val[stat]));
+       }
 
        return 0;
 }
@@ -301,6 +329,10 @@ static struct cftype files[] = {
                .name = "usage_percpu_sys",
                .seq_show = cpuacct_percpu_sys_seq_show,
        },
+       {
+               .name = "usage_all",
+               .seq_show = cpuacct_all_seq_show,
+       },
        {
                .name = "stat",
                .seq_show = cpuacct_stats_show,
@@ -316,11 +348,11 @@ static struct cftype files[] = {
 void cpuacct_charge(struct task_struct *tsk, u64 cputime)
 {
        struct cpuacct *ca;
-       int index = CPUACCT_USAGE_SYSTEM;
+       int index = CPUACCT_STAT_SYSTEM;
        struct pt_regs *regs = task_pt_regs(tsk);
 
        if (regs && user_mode(regs))
-               index = CPUACCT_USAGE_USER;
+               index = CPUACCT_STAT_USER;
 
        rcu_read_lock();
 
index 75f98c5498d55d38b0a36bba2719907742166fe1..ea0f6f31a2449440e502ee029eb2f3cfe2328c1b 100644 (file)
@@ -49,15 +49,12 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq);
  */
 void irqtime_account_irq(struct task_struct *curr)
 {
-       unsigned long flags;
        s64 delta;
        int cpu;
 
        if (!sched_clock_irqtime)
                return;
 
-       local_irq_save(flags);
-
        cpu = smp_processor_id();
        delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
        __this_cpu_add(irq_start_time, delta);
@@ -75,44 +72,53 @@ void irqtime_account_irq(struct task_struct *curr)
                __this_cpu_add(cpu_softirq_time, delta);
 
        irq_time_write_end();
-       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(irqtime_account_irq);
 
-static int irqtime_account_hi_update(void)
+static cputime_t irqtime_account_hi_update(cputime_t maxtime)
 {
        u64 *cpustat = kcpustat_this_cpu->cpustat;
        unsigned long flags;
-       u64 latest_ns;
-       int ret = 0;
+       cputime_t irq_cputime;
 
        local_irq_save(flags);
-       latest_ns = this_cpu_read(cpu_hardirq_time);
-       if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ])
-               ret = 1;
+       irq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time)) -
+                     cpustat[CPUTIME_IRQ];
+       irq_cputime = min(irq_cputime, maxtime);
+       cpustat[CPUTIME_IRQ] += irq_cputime;
        local_irq_restore(flags);
-       return ret;
+       return irq_cputime;
 }
 
-static int irqtime_account_si_update(void)
+static cputime_t irqtime_account_si_update(cputime_t maxtime)
 {
        u64 *cpustat = kcpustat_this_cpu->cpustat;
        unsigned long flags;
-       u64 latest_ns;
-       int ret = 0;
+       cputime_t softirq_cputime;
 
        local_irq_save(flags);
-       latest_ns = this_cpu_read(cpu_softirq_time);
-       if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ])
-               ret = 1;
+       softirq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time)) -
+                         cpustat[CPUTIME_SOFTIRQ];
+       softirq_cputime = min(softirq_cputime, maxtime);
+       cpustat[CPUTIME_SOFTIRQ] += softirq_cputime;
        local_irq_restore(flags);
-       return ret;
+       return softirq_cputime;
 }
 
 #else /* CONFIG_IRQ_TIME_ACCOUNTING */
 
 #define sched_clock_irqtime    (0)
 
+static cputime_t irqtime_account_hi_update(cputime_t dummy)
+{
+       return 0;
+}
+
+static cputime_t irqtime_account_si_update(cputime_t dummy)
+{
+       return 0;
+}
+
 #endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
 
 static inline void task_group_account_field(struct task_struct *p, int index,
@@ -257,29 +263,42 @@ void account_idle_time(cputime_t cputime)
                cpustat[CPUTIME_IDLE] += (__force u64) cputime;
 }
 
-static __always_inline bool steal_account_process_tick(void)
+static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
 {
 #ifdef CONFIG_PARAVIRT
        if (static_key_false(&paravirt_steal_enabled)) {
+               cputime_t steal_cputime;
                u64 steal;
-               unsigned long steal_jiffies;
 
                steal = paravirt_steal_clock(smp_processor_id());
                steal -= this_rq()->prev_steal_time;
 
-               /*
-                * steal is in nsecs but our caller is expecting steal
-                * time in jiffies. Lets cast the result to jiffies
-                * granularity and account the rest on the next rounds.
-                */
-               steal_jiffies = nsecs_to_jiffies(steal);
-               this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
+               steal_cputime = min(nsecs_to_cputime(steal), maxtime);
+               account_steal_time(steal_cputime);
+               this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime);
 
-               account_steal_time(jiffies_to_cputime(steal_jiffies));
-               return steal_jiffies;
+               return steal_cputime;
        }
 #endif
-       return false;
+       return 0;
+}
+
+/*
+ * Account how much elapsed time was spent in steal, irq, or softirq time.
+ */
+static inline cputime_t account_other_time(cputime_t max)
+{
+       cputime_t accounted;
+
+       accounted = steal_account_process_time(max);
+
+       if (accounted < max)
+               accounted += irqtime_account_hi_update(max - accounted);
+
+       if (accounted < max)
+               accounted += irqtime_account_si_update(max - accounted);
+
+       return accounted;
 }
 
 /*
@@ -342,21 +361,23 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
 static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
                                         struct rq *rq, int ticks)
 {
-       cputime_t scaled = cputime_to_scaled(cputime_one_jiffy);
-       u64 cputime = (__force u64) cputime_one_jiffy;
-       u64 *cpustat = kcpustat_this_cpu->cpustat;
+       u64 cputime = (__force u64) cputime_one_jiffy * ticks;
+       cputime_t scaled, other;
 
-       if (steal_account_process_tick())
+       /*
+        * When returning from idle, many ticks can get accounted at
+        * once, including some ticks of steal, irq, and softirq time.
+        * Subtract those ticks from the amount of time accounted to
+        * idle, or potentially user or system time. Due to rounding,
+        * other time can exceed ticks occasionally.
+        */
+       other = account_other_time(cputime);
+       if (other >= cputime)
                return;
+       cputime -= other;
+       scaled = cputime_to_scaled(cputime);
 
-       cputime *= ticks;
-       scaled *= ticks;
-
-       if (irqtime_account_hi_update()) {
-               cpustat[CPUTIME_IRQ] += cputime;
-       } else if (irqtime_account_si_update()) {
-               cpustat[CPUTIME_SOFTIRQ] += cputime;
-       } else if (this_cpu_ksoftirqd() == p) {
+       if (this_cpu_ksoftirqd() == p) {
                /*
                 * ksoftirqd time do not get accounted in cpu_softirq_time.
                 * So, we have to handle it separately here.
@@ -406,6 +427,10 @@ void vtime_common_task_switch(struct task_struct *prev)
 }
 #endif
 
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 /*
  * Archs that account the whole time spent in the idle task
  * (outside irq) as idle time can rely on this and just implement
@@ -415,33 +440,16 @@ void vtime_common_task_switch(struct task_struct *prev)
  * vtime_account().
  */
 #ifndef __ARCH_HAS_VTIME_ACCOUNT
-void vtime_common_account_irq_enter(struct task_struct *tsk)
+void vtime_account_irq_enter(struct task_struct *tsk)
 {
-       if (!in_interrupt()) {
-               /*
-                * If we interrupted user, context_tracking_in_user()
-                * is 1 because the context tracking don't hook
-                * on irq entry/exit. This way we know if
-                * we need to flush user time on kernel entry.
-                */
-               if (context_tracking_in_user()) {
-                       vtime_account_user(tsk);
-                       return;
-               }
-
-               if (is_idle_task(tsk)) {
-                       vtime_account_idle(tsk);
-                       return;
-               }
-       }
-       vtime_account_system(tsk);
+       if (!in_interrupt() && is_idle_task(tsk))
+               vtime_account_idle(tsk);
+       else
+               vtime_account_system(tsk);
 }
-EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
+EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
 #endif /* __ARCH_HAS_VTIME_ACCOUNT */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
-
 
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
 {
        *ut = p->utime;
@@ -466,7 +474,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
  */
 void account_process_tick(struct task_struct *p, int user_tick)
 {
-       cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
+       cputime_t cputime, scaled, steal;
        struct rq *rq = this_rq();
 
        if (vtime_accounting_cpu_enabled())
@@ -477,16 +485,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
                return;
        }
 
-       if (steal_account_process_tick())
+       cputime = cputime_one_jiffy;
+       steal = steal_account_process_time(cputime);
+
+       if (steal >= cputime)
                return;
 
+       cputime -= steal;
+       scaled = cputime_to_scaled(cputime);
+
        if (user_tick)
-               account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
+               account_user_time(p, cputimescaled);
        else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
-               account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
-                                   one_jiffy_scaled);
+               account_system_time(p, HARDIRQ_OFFSET, cputime, scaled);
        else
-               account_idle_time(cputime_one_jiffy);
+               account_idle_time(cputime);
 }
 
 /*
@@ -681,12 +694,14 @@ static cputime_t vtime_delta(struct task_struct *tsk)
 static cputime_t get_vtime_delta(struct task_struct *tsk)
 {
        unsigned long now = READ_ONCE(jiffies);
-       unsigned long delta = now - tsk->vtime_snap;
+       cputime_t delta, other;
 
+       delta = jiffies_to_cputime(now - tsk->vtime_snap);
+       other = account_other_time(delta);
        WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
        tsk->vtime_snap = now;
 
-       return jiffies_to_cputime(delta);
+       return delta - other;
 }
 
 static void __vtime_account_system(struct task_struct *tsk)
@@ -706,16 +721,6 @@ void vtime_account_system(struct task_struct *tsk)
        write_seqcount_end(&tsk->vtime_seqcount);
 }
 
-void vtime_gen_account_irq_exit(struct task_struct *tsk)
-{
-       write_seqcount_begin(&tsk->vtime_seqcount);
-       if (vtime_delta(tsk))
-               __vtime_account_system(tsk);
-       if (context_tracking_in_user())
-               tsk->vtime_snap_whence = VTIME_USER;
-       write_seqcount_end(&tsk->vtime_seqcount);
-}
-
 void vtime_account_user(struct task_struct *tsk)
 {
        cputime_t delta_cpu;
index 0368c393a3362d981e79745716cbb59b2989dac8..2a0a9995256d9e920d3c94cf944cd6acb61fa627 100644 (file)
@@ -879,9 +879,9 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
        nr_switches = p->nvcsw + p->nivcsw;
 
-#ifdef CONFIG_SCHEDSTATS
        P(se.nr_migrations);
 
+#ifdef CONFIG_SCHEDSTATS
        if (schedstat_enabled()) {
                u64 avg_atom, avg_per_cpu;
 
index bdcbeea90c950523b5276cfd1d8464ceef2d4fd8..4088eedea7637859844c777dfa56dfb23136c142 100644 (file)
@@ -690,6 +690,11 @@ void init_entity_runnable_average(struct sched_entity *se)
        /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
 }
 
+static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
+static int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq);
+static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force);
+static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
+
 /*
  * With new tasks being created, their initial util_avgs are extrapolated
  * based on the cfs_rq's current util_avg:
@@ -720,6 +725,8 @@ void post_init_entity_util_avg(struct sched_entity *se)
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
        struct sched_avg *sa = &se->avg;
        long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
+       u64 now = cfs_rq_clock_task(cfs_rq);
+       int tg_update;
 
        if (cap > 0) {
                if (cfs_rq->avg.util_avg != 0) {
@@ -733,18 +740,42 @@ void post_init_entity_util_avg(struct sched_entity *se)
                }
                sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
        }
+
+       if (entity_is_task(se)) {
+               struct task_struct *p = task_of(se);
+               if (p->sched_class != &fair_sched_class) {
+                       /*
+                        * For !fair tasks do:
+                        *
+                       update_cfs_rq_load_avg(now, cfs_rq, false);
+                       attach_entity_load_avg(cfs_rq, se);
+                       switched_from_fair(rq, p);
+                        *
+                        * such that the next switched_to_fair() has the
+                        * expected state.
+                        */
+                       se->avg.last_update_time = now;
+                       return;
+               }
+       }
+
+       tg_update = update_cfs_rq_load_avg(now, cfs_rq, false);
+       attach_entity_load_avg(cfs_rq, se);
+       if (tg_update)
+               update_tg_load_avg(cfs_rq, false);
 }
 
-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
-#else
+#else /* !CONFIG_SMP */
 void init_entity_runnable_average(struct sched_entity *se)
 {
 }
 void post_init_entity_util_avg(struct sched_entity *se)
 {
 }
-#endif
+static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
+{
+}
+#endif /* CONFIG_SMP */
 
 /*
  * Update the current task's runtime statistics.
@@ -1305,6 +1336,8 @@ static void task_numa_assign(struct task_numa_env *env,
 {
        if (env->best_task)
                put_task_struct(env->best_task);
+       if (p)
+               get_task_struct(p);
 
        env->best_task = p;
        env->best_imp = imp;
@@ -1372,31 +1405,11 @@ static void task_numa_compare(struct task_numa_env *env,
        long imp = env->p->numa_group ? groupimp : taskimp;
        long moveimp = imp;
        int dist = env->dist;
-       bool assigned = false;
 
        rcu_read_lock();
-
-       raw_spin_lock_irq(&dst_rq->lock);
-       cur = dst_rq->curr;
-       /*
-        * No need to move the exiting task or idle task.
-        */
-       if ((cur->flags & PF_EXITING) || is_idle_task(cur))
+       cur = task_rcu_dereference(&dst_rq->curr);
+       if (cur && ((cur->flags & PF_EXITING) || is_idle_task(cur)))
                cur = NULL;
-       else {
-               /*
-                * The task_struct must be protected here to protect the
-                * p->numa_faults access in the task_weight since the
-                * numa_faults could already be freed in the following path:
-                * finish_task_switch()
-                *     --> put_task_struct()
-                *         --> __put_task_struct()
-                *             --> task_numa_free()
-                */
-               get_task_struct(cur);
-       }
-
-       raw_spin_unlock_irq(&dst_rq->lock);
 
        /*
         * Because we have preemption enabled we can get migrated around and
@@ -1479,7 +1492,6 @@ balance:
                 */
                if (!load_too_imbalanced(src_load, dst_load, env)) {
                        imp = moveimp - 1;
-                       put_task_struct(cur);
                        cur = NULL;
                        goto assign;
                }
@@ -1505,16 +1517,9 @@ balance:
                env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
 
 assign:
-       assigned = true;
        task_numa_assign(env, cur, imp);
 unlock:
        rcu_read_unlock();
-       /*
-        * The dst_rq->curr isn't assigned. The protection for task_struct is
-        * finished.
-        */
-       if (cur && !assigned)
-               put_task_struct(cur);
 }
 
 static void task_numa_find_cpu(struct task_numa_env *env,
@@ -2499,28 +2504,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 # ifdef CONFIG_SMP
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-       long tg_weight;
+       long tg_weight, load, shares;
 
        /*
-        * Use this CPU's real-time load instead of the last load contribution
-        * as the updating of the contribution is delayed, and we will use the
-        * the real-time load to calc the share. See update_tg_load_avg().
+        * This really should be: cfs_rq->avg.load_avg, but instead we use
+        * cfs_rq->load.weight, which is its upper bound. This helps ramp up
+        * the shares for small weight interactive tasks.
         */
-       tg_weight = atomic_long_read(&tg->load_avg);
-       tg_weight -= cfs_rq->tg_load_avg_contrib;
-       tg_weight += cfs_rq->load.weight;
-
-       return tg_weight;
-}
+       load = scale_load_down(cfs_rq->load.weight);
 
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
-       long tg_weight, load, shares;
+       tg_weight = atomic_long_read(&tg->load_avg);
 
-       tg_weight = calc_tg_weight(tg, cfs_rq);
-       load = cfs_rq->load.weight;
+       /* Ensure tg_weight >= load */
+       tg_weight -= cfs_rq->tg_load_avg_contrib;
+       tg_weight += load;
 
        shares = (tg->shares * load);
        if (tg_weight)
@@ -2539,6 +2538,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
        return tg->shares;
 }
 # endif /* CONFIG_SMP */
+
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                            unsigned long weight)
 {
@@ -2873,8 +2873,6 @@ void set_task_rq_fair(struct sched_entity *se,
 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
-static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
-
 static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
 {
        struct rq *rq = rq_of(cfs_rq);
@@ -2921,7 +2919,23 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
        WRITE_ONCE(*ptr, res);                                  \
 } while (0)
 
-/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
+/**
+ * update_cfs_rq_load_avg - update the cfs_rq's load/util averages
+ * @now: current time, as per cfs_rq_clock_task()
+ * @cfs_rq: cfs_rq to update
+ * @update_freq: should we call cfs_rq_util_change() or will the call do so
+ *
+ * The cfs_rq avg is the direct sum of all its entities (blocked and runnable)
+ * avg. The immediate corollary is that all (fair) tasks must be attached, see
+ * post_init_entity_util_avg().
+ *
+ * cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
+ *
+ * Returns true if the load decayed or we removed utilization. It is expected
+ * that one calls update_tg_load_avg() on this condition, but after you've
+ * modified the cfs_rq avg (attach/detach), such that we propagate the new
+ * avg up.
+ */
 static inline int
 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 {
@@ -2976,6 +2990,14 @@ static inline void update_load_avg(struct sched_entity *se, int update_tg)
                update_tg_load_avg(cfs_rq, 0);
 }
 
+/**
+ * attach_entity_load_avg - attach this entity to its cfs_rq load avg
+ * @cfs_rq: cfs_rq to attach to
+ * @se: sched_entity to attach
+ *
+ * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * cfs_rq->avg.last_update_time being current.
+ */
 static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        if (!sched_feat(ATTACH_AGE_LOAD))
@@ -2984,6 +3006,8 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
        /*
         * If we got migrated (either between CPUs or between cgroups) we'll
         * have aged the average right before clearing @last_update_time.
+        *
+        * Or we're fresh through post_init_entity_util_avg().
         */
        if (se->avg.last_update_time) {
                __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
@@ -3005,6 +3029,14 @@ skip_aging:
        cfs_rq_util_change(cfs_rq);
 }
 
+/**
+ * detach_entity_load_avg - detach this entity from its cfs_rq load avg
+ * @cfs_rq: cfs_rq to detach from
+ * @se: sched_entity to detach
+ *
+ * Must call update_cfs_rq_load_avg() before this, since we rely on
+ * cfs_rq->avg.last_update_time being current.
+ */
 static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
@@ -3089,11 +3121,14 @@ void remove_entity_load_avg(struct sched_entity *se)
        u64 last_update_time;
 
        /*
-        * Newly created task or never used group entity should not be removed
-        * from its (source) cfs_rq
+        * tasks cannot exit without having gone through wake_up_new_task() ->
+        * post_init_entity_util_avg() which will have added things to the
+        * cfs_rq, so we can remove unconditionally.
+        *
+        * Similarly for groups, they will have passed through
+        * post_init_entity_util_avg() before unregister_sched_fair_group()
+        * calls this.
         */
-       if (se->avg.last_update_time == 0)
-               return;
 
        last_update_time = cfs_rq_last_update_time(cfs_rq);
 
@@ -3116,6 +3151,12 @@ static int idle_balance(struct rq *this_rq);
 
 #else /* CONFIG_SMP */
 
+static inline int
+update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
+{
+       return 0;
+}
+
 static inline void update_load_avg(struct sched_entity *se, int not_used)
 {
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -3705,7 +3746,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
 {
        if (unlikely(cfs_rq->throttle_count))
-               return cfs_rq->throttled_clock_task;
+               return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
 
        return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
 }
@@ -3843,13 +3884,11 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
        struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
 
        cfs_rq->throttle_count--;
-#ifdef CONFIG_SMP
        if (!cfs_rq->throttle_count) {
                /* adjust cfs_rq_clock_task() */
                cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
                                             cfs_rq->throttled_clock_task;
        }
-#endif
 
        return 0;
 }
@@ -4202,26 +4241,6 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
        if (!cfs_bandwidth_used())
                return;
 
-       /* Synchronize hierarchical throttle counter: */
-       if (unlikely(!cfs_rq->throttle_uptodate)) {
-               struct rq *rq = rq_of(cfs_rq);
-               struct cfs_rq *pcfs_rq;
-               struct task_group *tg;
-
-               cfs_rq->throttle_uptodate = 1;
-
-               /* Get closest up-to-date node, because leaves go first: */
-               for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
-                       pcfs_rq = tg->cfs_rq[cpu_of(rq)];
-                       if (pcfs_rq->throttle_uptodate)
-                               break;
-               }
-               if (tg) {
-                       cfs_rq->throttle_count = pcfs_rq->throttle_count;
-                       cfs_rq->throttled_clock_task = rq_clock_task(rq);
-               }
-       }
-
        /* an active group must be handled by the update_curr()->put() path */
        if (!cfs_rq->runtime_enabled || cfs_rq->curr)
                return;
@@ -4236,6 +4255,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
                throttle_cfs_rq(cfs_rq);
 }
 
+static void sync_throttle(struct task_group *tg, int cpu)
+{
+       struct cfs_rq *pcfs_rq, *cfs_rq;
+
+       if (!cfs_bandwidth_used())
+               return;
+
+       if (!tg->parent)
+               return;
+
+       cfs_rq = tg->cfs_rq[cpu];
+       pcfs_rq = tg->parent->cfs_rq[cpu];
+
+       cfs_rq->throttle_count = pcfs_rq->throttle_count;
+       pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
+}
+
 /* conditionally throttle active cfs_rq's from put_prev_entity() */
 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
@@ -4375,6 +4411,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
 static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
 static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
+static inline void sync_throttle(struct task_group *tg, int cpu) {}
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
 
 static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
@@ -4483,7 +4520,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
                 *
                 * note: in the case of encountering a throttled cfs_rq we will
                 * post the final h_nr_running increment below.
-               */
+                */
                if (cfs_rq_throttled(cfs_rq))
                        break;
                cfs_rq->h_nr_running++;
@@ -4946,19 +4983,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                return wl;
 
        for_each_sched_entity(se) {
-               long w, W;
+               struct cfs_rq *cfs_rq = se->my_q;
+               long W, w = cfs_rq_load_avg(cfs_rq);
 
-               tg = se->my_q->tg;
+               tg = cfs_rq->tg;
 
                /*
                 * W = @wg + \Sum rw_j
                 */
-               W = wg + calc_tg_weight(tg, se->my_q);
+               W = wg + atomic_long_read(&tg->load_avg);
+
+               /* Ensure \Sum rw_j >= rw_i */
+               W -= cfs_rq->tg_load_avg_contrib;
+               W += w;
 
                /*
                 * w = rw_i + @wl
                 */
-               w = cfs_rq_load_avg(se->my_q) + wl;
+               w += wl;
 
                /*
                 * wl = S * s'_i; see (2)
@@ -8319,31 +8361,17 @@ static void task_fork_fair(struct task_struct *p)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se, *curr;
-       int this_cpu = smp_processor_id();
        struct rq *rq = this_rq();
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&rq->lock, flags);
 
+       raw_spin_lock(&rq->lock);
        update_rq_clock(rq);
 
        cfs_rq = task_cfs_rq(current);
        curr = cfs_rq->curr;
-
-       /*
-        * Not only the cpu but also the task_group of the parent might have
-        * been changed after parent->se.parent,cfs_rq were copied to
-        * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
-        * of child point to valid ones.
-        */
-       rcu_read_lock();
-       __set_task_cpu(p, this_cpu);
-       rcu_read_unlock();
-
-       update_curr(cfs_rq);
-
-       if (curr)
+       if (curr) {
+               update_curr(cfs_rq);
                se->vruntime = curr->vruntime;
+       }
        place_entity(cfs_rq, se, 1);
 
        if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
@@ -8356,8 +8384,7 @@ static void task_fork_fair(struct task_struct *p)
        }
 
        se->vruntime -= cfs_rq->min_vruntime;
-
-       raw_spin_unlock_irqrestore(&rq->lock, flags);
+       raw_spin_unlock(&rq->lock);
 }
 
 /*
@@ -8413,6 +8440,8 @@ static void detach_task_cfs_rq(struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 now = cfs_rq_clock_task(cfs_rq);
+       int tg_update;
 
        if (!vruntime_normalized(p)) {
                /*
@@ -8424,13 +8453,18 @@ static void detach_task_cfs_rq(struct task_struct *p)
        }
 
        /* Catch up with the cfs_rq and remove our load when we leave */
+       tg_update = update_cfs_rq_load_avg(now, cfs_rq, false);
        detach_entity_load_avg(cfs_rq, se);
+       if (tg_update)
+               update_tg_load_avg(cfs_rq, false);
 }
 
 static void attach_task_cfs_rq(struct task_struct *p)
 {
        struct sched_entity *se = &p->se;
        struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       u64 now = cfs_rq_clock_task(cfs_rq);
+       int tg_update;
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
        /*
@@ -8441,7 +8475,10 @@ static void attach_task_cfs_rq(struct task_struct *p)
 #endif
 
        /* Synchronize task with its cfs_rq */
+       tg_update = update_cfs_rq_load_avg(now, cfs_rq, false);
        attach_entity_load_avg(cfs_rq, se);
+       if (tg_update)
+               update_tg_load_avg(cfs_rq, false);
 
        if (!vruntime_normalized(p))
                se->vruntime += cfs_rq->min_vruntime;
@@ -8501,6 +8538,14 @@ void init_cfs_rq(struct cfs_rq *cfs_rq)
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
+static void task_set_group_fair(struct task_struct *p)
+{
+       struct sched_entity *se = &p->se;
+
+       set_task_rq(p, task_cpu(p));
+       se->depth = se->parent ? se->parent->depth + 1 : 0;
+}
+
 static void task_move_group_fair(struct task_struct *p)
 {
        detach_task_cfs_rq(p);
@@ -8513,6 +8558,19 @@ static void task_move_group_fair(struct task_struct *p)
        attach_task_cfs_rq(p);
 }
 
+static void task_change_group_fair(struct task_struct *p, int type)
+{
+       switch (type) {
+       case TASK_SET_GROUP:
+               task_set_group_fair(p);
+               break;
+
+       case TASK_MOVE_GROUP:
+               task_move_group_fair(p);
+               break;
+       }
+}
+
 void free_fair_sched_group(struct task_group *tg)
 {
        int i;
@@ -8564,10 +8622,6 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
                init_cfs_rq(cfs_rq);
                init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
                init_entity_runnable_average(se);
-
-               raw_spin_lock_irq(&rq->lock);
-               post_init_entity_util_avg(se);
-               raw_spin_unlock_irq(&rq->lock);
        }
 
        return 1;
@@ -8578,6 +8632,23 @@ err:
        return 0;
 }
 
+void online_fair_sched_group(struct task_group *tg)
+{
+       struct sched_entity *se;
+       struct rq *rq;
+       int i;
+
+       for_each_possible_cpu(i) {
+               rq = cpu_rq(i);
+               se = tg->se[i];
+
+               raw_spin_lock_irq(&rq->lock);
+               post_init_entity_util_avg(se);
+               sync_throttle(tg, i);
+               raw_spin_unlock_irq(&rq->lock);
+       }
+}
+
 void unregister_fair_sched_group(struct task_group *tg)
 {
        unsigned long flags;
@@ -8682,6 +8753,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
        return 1;
 }
 
+void online_fair_sched_group(struct task_group *tg) { }
+
 void unregister_fair_sched_group(struct task_group *tg) { }
 
 #endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -8741,7 +8814,7 @@ const struct sched_class fair_sched_class = {
        .update_curr            = update_curr_fair,
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-       .task_move_group        = task_move_group_fair,
+       .task_change_group      = task_change_group_fair,
 #endif
 };
 
index c5aeedf4e93ad8f8f5c2edaf7248ac0ce9c47e8e..9fb873cfc75cfb332c59437bd936e1dd9493d92e 100644 (file)
@@ -201,6 +201,8 @@ exit_idle:
  */
 static void cpu_idle_loop(void)
 {
+       int cpu = smp_processor_id();
+
        while (1) {
                /*
                 * If the arch has a polling bit, we maintain an invariant:
@@ -219,7 +221,7 @@ static void cpu_idle_loop(void)
                        check_pgt_cache();
                        rmb();
 
-                       if (cpu_is_offline(smp_processor_id())) {
+                       if (cpu_is_offline(cpu)) {
                                cpuhp_report_idle_dead();
                                arch_cpu_idle_dead();
                        }
index b0b93fd33af9e4bb4d61edcda77d3b761cb9b8de..a2d6eb71f06b80527b86dd99a83f6d4621265cec 100644 (file)
@@ -78,11 +78,11 @@ void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
        loads[2] = (avenrun[2] + offset) << shift;
 }
 
-long calc_load_fold_active(struct rq *this_rq)
+long calc_load_fold_active(struct rq *this_rq, long adjust)
 {
        long nr_active, delta = 0;
 
-       nr_active = this_rq->nr_running;
+       nr_active = this_rq->nr_running - adjust;
        nr_active += (long)this_rq->nr_uninterruptible;
 
        if (nr_active != this_rq->calc_load_active) {
@@ -188,7 +188,7 @@ void calc_load_enter_idle(void)
         * We're going into NOHZ mode, if there's any pending delta, fold it
         * into the pending idle delta.
         */
-       delta = calc_load_fold_active(this_rq);
+       delta = calc_load_fold_active(this_rq, 0);
        if (delta) {
                int idx = calc_load_write_idx();
 
@@ -389,7 +389,7 @@ void calc_global_load_tick(struct rq *this_rq)
        if (time_before(jiffies, this_rq->calc_load_update))
                return;
 
-       delta  = calc_load_fold_active(this_rq);
+       delta  = calc_load_fold_active(this_rq, 0);
        if (delta)
                atomic_long_add(delta, &calc_load_tasks);
 
index 7cbeb92a1cb9361102b8514f612ee81be87fef10..c64fc5114004f6a893a1bf942a49f3df1d6c8fea 100644 (file)
@@ -28,7 +28,7 @@ extern unsigned long calc_load_update;
 extern atomic_long_t calc_load_tasks;
 
 extern void calc_global_load_tick(struct rq *this_rq);
-extern long calc_load_fold_active(struct rq *this_rq);
+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
 
 #ifdef CONFIG_SMP
 extern void cpu_load_update_active(struct rq *this_rq);
@@ -321,6 +321,7 @@ extern int tg_nop(struct task_group *tg, void *data);
 
 extern void free_fair_sched_group(struct task_group *tg);
 extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
+extern void online_fair_sched_group(struct task_group *tg);
 extern void unregister_fair_sched_group(struct task_group *tg);
 extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
                        struct sched_entity *se, int cpu,
@@ -437,7 +438,7 @@ struct cfs_rq {
 
        u64 throttled_clock, throttled_clock_task;
        u64 throttled_clock_task_time;
-       int throttled, throttle_count, throttle_uptodate;
+       int throttled, throttle_count;
        struct list_head throttled_list;
 #endif /* CONFIG_CFS_BANDWIDTH */
 #endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -1113,7 +1114,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
         * In particular, the load of prev->state in finish_task_switch() must
         * happen before this.
         *
-        * Pairs with the smp_cond_acquire() in try_to_wake_up().
+        * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
         */
        smp_store_release(&prev->on_cpu, 0);
 #endif
@@ -1246,8 +1247,11 @@ struct sched_class {
 
        void (*update_curr) (struct rq *rq);
 
+#define TASK_SET_GROUP  0
+#define TASK_MOVE_GROUP        1
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
-       void (*task_move_group) (struct task_struct *p);
+       void (*task_change_group) (struct task_struct *p, int type);
 #endif
 };
 
@@ -1809,16 +1813,3 @@ static inline void cpufreq_trigger_update(u64 time) {}
 #else /* arch_scale_freq_capacity */
 #define arch_scale_freq_invariant()    (false)
 #endif
-
-static inline void account_reset_rq(struct rq *rq)
-{
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-       rq->prev_irq_time = 0;
-#endif
-#ifdef CONFIG_PARAVIRT
-       rq->prev_steal_time = 0;
-#endif
-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-       rq->prev_steal_time_rq = 0;
-#endif
-}
index 96e9bc40667fd708837e4cfcccc0f0d2a72ee74e..af21afc00d088c51c3fb1b0ab4306fd31043106d 100644 (file)
@@ -2751,23 +2751,18 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
  *  @ts: upper bound on process time suspension
  */
 int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
-                       const struct timespec *ts)
+                   const struct timespec *ts)
 {
+       ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
        struct task_struct *tsk = current;
-       long timeout = MAX_SCHEDULE_TIMEOUT;
        sigset_t mask = *which;
-       int sig;
+       int sig, ret = 0;
 
        if (ts) {
                if (!timespec_valid(ts))
                        return -EINVAL;
-               timeout = timespec_to_jiffies(ts);
-               /*
-                * We can be close to the next tick, add another one
-                * to ensure we will wait at least the time asked for.
-                */
-               if (ts->tv_sec || ts->tv_nsec)
-                       timeout++;
+               timeout = timespec_to_ktime(*ts);
+               to = &timeout;
        }
 
        /*
@@ -2778,7 +2773,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
 
        spin_lock_irq(&tsk->sighand->siglock);
        sig = dequeue_signal(tsk, &mask, info);
-       if (!sig && timeout) {
+       if (!sig && timeout.tv64) {
                /*
                 * None ready, temporarily unblock those we're interested
                 * while we are sleeping in so that we'll be awakened when
@@ -2790,8 +2785,9 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
                recalc_sigpending();
                spin_unlock_irq(&tsk->sighand->siglock);
 
-               timeout = freezable_schedule_timeout_interruptible(timeout);
-
+               __set_current_state(TASK_INTERRUPTIBLE);
+               ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
+                                                        HRTIMER_MODE_REL);
                spin_lock_irq(&tsk->sighand->siglock);
                __set_task_blocked(tsk, &tsk->real_blocked);
                sigemptyset(&tsk->real_blocked);
@@ -2801,7 +2797,7 @@ int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
 
        if (sig)
                return sig;
-       return timeout ? -EINTR : -EAGAIN;
+       return ret ? -EINTR : -EAGAIN;
 }
 
 /**
index 74165443c240147cd701489298d2b552c8f46adf..36552beed39713526aa384a9cf9f1878630834fb 100644 (file)
@@ -107,7 +107,7 @@ void __init call_function_init(void)
  */
 static __always_inline void csd_lock_wait(struct call_single_data *csd)
 {
-       smp_cond_acquire(!(csd->flags & CSD_FLAG_LOCK));
+       smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
 }
 
 static __always_inline void csd_lock(struct call_single_data *csd)
index 87b2fc38398b1550921b694b12767b652d596e84..35f0dcb1cb4f6db187679edc586b5df543e85046 100644 (file)
@@ -1204,6 +1204,17 @@ static struct ctl_table kern_table[] = {
                .extra1         = &one,
                .extra2         = &one,
        },
+#endif
+#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+       {
+               .procname       = "panic_on_rcu_stall",
+               .data           = &sysctl_panic_on_rcu_stall,
+               .maxlen         = sizeof(sysctl_panic_on_rcu_stall),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
 #endif
        { }
 };
index 53fa971d000d0b7fff1b63be23007057468bf120..6ab4842b00e8bb71db357e5933fc6c3b72669832 100644 (file)
@@ -108,7 +108,6 @@ void task_work_run(void)
                 * fail, but it can play with *work and other entries.
                 */
                raw_spin_unlock_wait(&task->pi_lock);
-               smp_mb();
 
                do {
                        next = work->next;
index e840ed867a5d9406e26a8bc13f3e635b0d66cbdd..c3aad685bbc036cc2f03672085aedd9dc9c8fb86 100644 (file)
@@ -30,7 +30,6 @@
  * struct alarm_base - Alarm timer bases
  * @lock:              Lock for syncrhonized access to the base
  * @timerqueue:                Timerqueue head managing the list of events
- * @timer:             hrtimer used to schedule events while running
  * @gettime:           Function to read the time correlating to the base
  * @base_clockid:      clockid for the base
  */
index a9b76a40319e86b5d545621d274c262bc69c7c70..2c5bc77c0bb004fa5b9bd5e9ca60f60b40ba1b4f 100644 (file)
@@ -645,7 +645,7 @@ void tick_cleanup_dead_cpu(int cpu)
 #endif
 
 #ifdef CONFIG_SYSFS
-struct bus_type clockevents_subsys = {
+static struct bus_type clockevents_subsys = {
        .name           = "clockevents",
        .dev_name       = "clockevent",
 };
index 56ece145a814a87ce172d96f32c17bf9d119b2f8..6a5a310a1a5351205ef9bd633a39efa427897556 100644 (file)
@@ -669,10 +669,12 @@ static void clocksource_enqueue(struct clocksource *cs)
        struct list_head *entry = &clocksource_list;
        struct clocksource *tmp;
 
-       list_for_each_entry(tmp, &clocksource_list, list)
+       list_for_each_entry(tmp, &clocksource_list, list) {
                /* Keep track of the place, where to insert */
-               if (tmp->rating >= cs->rating)
-                       entry = &tmp->list;
+               if (tmp->rating < cs->rating)
+                       break;
+               entry = &tmp->list;
+       }
        list_add(&cs->list, entry);
 }
 
index e99df0ff1d424528dd3e54663472dd5fc4a6c796..d13c9aebf7a3b7e955b897f30f749dc0907f15dc 100644 (file)
@@ -177,7 +177,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
 #endif
 }
 
-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#ifdef CONFIG_NO_HZ_COMMON
 static inline
 struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
                                         int pinned)
index 1cafba860b08ceb6030fa2a1f237e3950a4e0aa7..39008d78927acb4f9a62f582cb7baba3f27620ee 100644 (file)
@@ -777,6 +777,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
                        timer->it.cpu.expires = 0;
                        sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
                                           &itp->it_value);
+                       return;
                } else {
                        cpu_timer_sample_group(timer->it_clock, p, &now);
                        unlock_task_sighand(p, &flags);
index e622ba365a131a445bbfce4c4df860ad50afae48..b0928ab3270fb6d7666c6309eeb5f9dca014b91e 100644 (file)
@@ -43,13 +43,13 @@ static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters)
        int allowed_error_ns = usecs * 5;
 
        for (i = 0; i < iters; ++i) {
-               struct timespec ts1, ts2;
+               s64 kt1, kt2;
                int time_passed;
 
-               ktime_get_ts(&ts1);
+               kt1 = ktime_get_ns();
                udelay(usecs);
-               ktime_get_ts(&ts2);
-               time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1);
+               kt2 = ktime_get_ns();
+               time_passed = kt2 - kt1;
 
                if (i == 0 || time_passed < min)
                        min = time_passed;
@@ -87,11 +87,11 @@ static int udelay_test_show(struct seq_file *s, void *v)
        if (usecs > 0 && iters > 0) {
                return udelay_test_single(s, usecs, iters);
        } else if (usecs == 0) {
-               struct timespec ts;
+               struct timespec64 ts;
 
-               ktime_get_ts(&ts);
-               seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n",
-                               loops_per_jiffy, ts.tv_sec, ts.tv_nsec);
+               ktime_get_ts64(&ts);
+               seq_printf(s, "udelay() test (lpj=%ld kt=%lld.%09ld)\n",
+                               loops_per_jiffy, (s64)ts.tv_sec, ts.tv_nsec);
                seq_puts(s, "usage:\n");
                seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n");
                seq_puts(s, "cat " DEBUGFS_FILENAME "\n");
index 53d7184da0bedb5d7df959d09c2b787f77f0682b..690b797f522e38945ec021cfabe799568448cd7a 100644 (file)
@@ -75,6 +75,7 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 }
 
 static struct clock_event_device ce_broadcast_hrtimer = {
+       .name                   = "bc_hrtimer",
        .set_state_shutdown     = bc_shutdown,
        .set_next_ktime         = bc_set_next,
        .features               = CLOCK_EVT_FEAT_ONESHOT |
index 966a5a6fdd0a03c378debc87baae72672853a6fd..f738251000fe6b07de4f3d54ee78d8f78408c5d7 100644 (file)
@@ -164,3 +164,4 @@ static inline void timers_update_migration(bool update_nohz) { }
 DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
 
 extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
+void timer_clear_idle(void);
index 536ada80f6dde14ec91532ac719a1b2b4d941c58..204fdc86863d71368ef61faacdb0d5458e78039f 100644 (file)
@@ -31,7 +31,7 @@
 #include <trace/events/timer.h>
 
 /*
- * Per cpu nohz control structure
+ * Per-CPU nohz control structure
  */
 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
 
@@ -61,7 +61,7 @@ static void tick_do_update_jiffies64(ktime_t now)
        if (delta.tv64 < tick_period.tv64)
                return;
 
-       /* Reevalute with jiffies_lock held */
+       /* Reevaluate with jiffies_lock held */
        write_seqlock(&jiffies_lock);
 
        delta = ktime_sub(now, last_jiffies_update);
@@ -116,8 +116,8 @@ static void tick_sched_do_timer(ktime_t now)
 #ifdef CONFIG_NO_HZ_COMMON
        /*
         * Check if the do_timer duty was dropped. We don't care about
-        * concurrency: This happens only when the cpu in charge went
-        * into a long sleep. If two cpus happen to assign themself to
+        * concurrency: This happens only when the CPU in charge went
+        * into a long sleep. If two CPUs happen to assign themselves to
         * this duty, then the jiffies update is still serialized by
         * jiffies_lock.
         */
@@ -349,7 +349,7 @@ void tick_nohz_dep_clear_signal(struct signal_struct *sig, enum tick_dep_bits bi
 /*
  * Re-evaluate the need for the tick as we switch the current task.
  * It might need the tick due to per task/process properties:
- * perf events, posix cpu timers, ...
+ * perf events, posix CPU timers, ...
  */
 void __tick_nohz_task_switch(void)
 {
@@ -509,8 +509,8 @@ int tick_nohz_tick_stopped(void)
  *
  * In case the sched_tick was stopped on this CPU, we have to check if jiffies
  * must be updated. Otherwise an interrupt handler could use a stale jiffy
- * value. We do this unconditionally on any cpu, as we don't know whether the
- * cpu, which has the update task assigned is in a long sleep.
+ * value. We do this unconditionally on any CPU, as we don't know whether the
+ * CPU, which has the update task assigned is in a long sleep.
  */
 static void tick_nohz_update_jiffies(ktime_t now)
 {
@@ -526,7 +526,7 @@ static void tick_nohz_update_jiffies(ktime_t now)
 }
 
 /*
- * Updates the per cpu time idle statistics counters
+ * Updates the per-CPU time idle statistics counters
  */
 static void
 update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
@@ -566,12 +566,12 @@ static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
 }
 
 /**
- * get_cpu_idle_time_us - get the total idle time of a cpu
+ * get_cpu_idle_time_us - get the total idle time of a CPU
  * @cpu: CPU number to query
  * @last_update_time: variable to store update time in. Do not update
  * counters if NULL.
  *
- * Return the cummulative idle time (since boot) for a given
+ * Return the cumulative idle time (since boot) for a given
  * CPU, in microseconds.
  *
  * This time is measured via accounting rather than sampling,
@@ -607,12 +607,12 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
 EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
 
 /**
- * get_cpu_iowait_time_us - get the total iowait time of a cpu
+ * get_cpu_iowait_time_us - get the total iowait time of a CPU
  * @cpu: CPU number to query
  * @last_update_time: variable to store update time in. Do not update
  * counters if NULL.
  *
- * Return the cummulative iowait time (since boot) for a given
+ * Return the cumulative iowait time (since boot) for a given
  * CPU, in microseconds.
  *
  * This time is measured via accounting rather than sampling,
@@ -700,6 +700,12 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
        delta = next_tick - basemono;
        if (delta <= (u64)TICK_NSEC) {
                tick.tv64 = 0;
+
+               /*
+                * Tell the timer code that the base is not idle, i.e. undo
+                * the effect of get_next_timer_interrupt():
+                */
+               timer_clear_idle();
                /*
                 * We've not stopped the tick yet, and there's a timer in the
                 * next period, so no point in stopping it either, bail.
@@ -726,14 +732,14 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
        }
 
        /*
-        * If this cpu is the one which updates jiffies, then give up
-        * the assignment and let it be taken by the cpu which runs
-        * the tick timer next, which might be this cpu as well. If we
+        * If this CPU is the one which updates jiffies, then give up
+        * the assignment and let it be taken by the CPU which runs
+        * the tick timer next, which might be this CPU as well. If we
         * don't drop this here the jiffies might be stale and
         * do_timer() never invoked. Keep track of the fact that it
-        * was the one which had the do_timer() duty last. If this cpu
+        * was the one which had the do_timer() duty last. If this CPU
         * is the one which had the do_timer() duty last, we limit the
-        * sleep time to the timekeeping max_deferement value.
+        * sleep time to the timekeeping max_deferment value.
         * Otherwise we can sleep as long as we want.
         */
        delta = timekeeping_max_deferment();
@@ -809,6 +815,12 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
        tick_do_update_jiffies64(now);
        cpu_load_update_nohz_stop();
 
+       /*
+        * Clear the timer idle flag, so we avoid IPIs on remote queueing and
+        * the clock forward checks in the enqueue path:
+        */
+       timer_clear_idle();
+
        calc_load_exit_idle();
        touch_softlockup_watchdog_sched();
        /*
@@ -841,9 +853,9 @@ static void tick_nohz_full_update_tick(struct tick_sched *ts)
 static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
 {
        /*
-        * If this cpu is offline and it is the one which updates
+        * If this CPU is offline and it is the one which updates
         * jiffies, then give up the assignment and let it be taken by
-        * the cpu which runs the tick timer next. If we don't drop
+        * the CPU which runs the tick timer next. If we don't drop
         * this here the jiffies might be stale and do_timer() never
         * invoked.
         */
@@ -896,11 +908,10 @@ static void __tick_nohz_idle_enter(struct tick_sched *ts)
        ktime_t now, expires;
        int cpu = smp_processor_id();
 
-       now = tick_nohz_start_idle(ts);
-
        if (can_stop_idle_tick(cpu, ts)) {
                int was_stopped = ts->tick_stopped;
 
+               now = tick_nohz_start_idle(ts);
                ts->idle_calls++;
 
                expires = tick_nohz_stop_sched_tick(ts, now, cpu);
@@ -933,11 +944,11 @@ void tick_nohz_idle_enter(void)
        WARN_ON_ONCE(irqs_disabled());
 
        /*
-        * Update the idle state in the scheduler domain hierarchy
-        * when tick_nohz_stop_sched_tick() is called from the idle loop.
-        * State will be updated to busy during the first busy tick after
-        * exiting idle.
-        */
+        * Update the idle state in the scheduler domain hierarchy
+        * when tick_nohz_stop_sched_tick() is called from the idle loop.
+        * State will be updated to busy during the first busy tick after
+        * exiting idle.
+        */
        set_cpu_sd_state_idle();
 
        local_irq_disable();
@@ -1092,35 +1103,6 @@ static void tick_nohz_switch_to_nohz(void)
        tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
 }
 
-/*
- * When NOHZ is enabled and the tick is stopped, we need to kick the
- * tick timer from irq_enter() so that the jiffies update is kept
- * alive during long running softirqs. That's ugly as hell, but
- * correctness is key even if we need to fix the offending softirq in
- * the first place.
- *
- * Note, this is different to tick_nohz_restart. We just kick the
- * timer and do not touch the other magic bits which need to be done
- * when idle is left.
- */
-static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
-{
-#if 0
-       /* Switch back to 2.6.27 behaviour */
-       ktime_t delta;
-
-       /*
-        * Do not touch the tick device, when the next expiry is either
-        * already reached or less/equal than the tick period.
-        */
-       delta = ktime_sub(hrtimer_get_expires(&ts->sched_timer), now);
-       if (delta.tv64 <= tick_period.tv64)
-               return;
-
-       tick_nohz_restart(ts, now);
-#endif
-}
-
 static inline void tick_nohz_irq_enter(void)
 {
        struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
@@ -1131,10 +1113,8 @@ static inline void tick_nohz_irq_enter(void)
        now = ktime_get();
        if (ts->idle_active)
                tick_nohz_stop_idle(ts, now);
-       if (ts->tick_stopped) {
+       if (ts->tick_stopped)
                tick_nohz_update_jiffies(now);
-               tick_nohz_kick_tick(ts, now);
-       }
 }
 
 #else
@@ -1211,7 +1191,7 @@ void tick_setup_sched_timer(void)
        hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
        ts->sched_timer.function = tick_sched_timer;
 
-       /* Get the next period (per cpu) */
+       /* Get the next period (per-CPU) */
        hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
 
        /* Offset the tick to avert jiffies_lock contention. */
index 86628e755f38f82bab7423a2d9f702782bc6c598..7142580ad94fa03f3011b6c444b7bc3679ca6088 100644 (file)
@@ -67,20 +67,21 @@ static const unsigned short __mon_yday[2][13] = {
 #define SECS_PER_DAY   (SECS_PER_HOUR * 24)
 
 /**
- * time_to_tm - converts the calendar time to local broken-down time
+ * time64_to_tm - converts the calendar time to local broken-down time
  *
  * @totalsecs  the number of seconds elapsed since 00:00:00 on January 1, 1970,
  *             Coordinated Universal Time (UTC).
  * @offset     offset seconds adding to totalsecs.
  * @result     pointer to struct tm variable to receive broken-down time
  */
-void time_to_tm(time_t totalsecs, int offset, struct tm *result)
+void time64_to_tm(time64_t totalsecs, int offset, struct tm *result)
 {
        long days, rem, y;
+       int remainder;
        const unsigned short *ip;
 
-       days = totalsecs / SECS_PER_DAY;
-       rem = totalsecs % SECS_PER_DAY;
+       days = div_s64_rem(totalsecs, SECS_PER_DAY, &remainder);
+       rem = remainder;
        rem += offset;
        while (rem < 0) {
                rem += SECS_PER_DAY;
@@ -124,4 +125,4 @@ void time_to_tm(time_t totalsecs, int offset, struct tm *result)
        result->tm_mon = y;
        result->tm_mday = days + 1;
 }
-EXPORT_SYMBOL(time_to_tm);
+EXPORT_SYMBOL(time64_to_tm);
index 479d25cd3d4ffc53f75c5d29912df312f609bbb1..3b65746c7f156dbdea1e7b1f6fb4e43197820369 100644 (file)
@@ -480,10 +480,12 @@ static inline void old_vsyscall_fixup(struct timekeeper *tk)
        * users are removed, this can be killed.
        */
        remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
-       tk->tkr_mono.xtime_nsec -= remainder;
-       tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
-       tk->ntp_error += remainder << tk->ntp_error_shift;
-       tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
+       if (remainder != 0) {
+               tk->tkr_mono.xtime_nsec -= remainder;
+               tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
+               tk->ntp_error += remainder << tk->ntp_error_shift;
+               tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
+       }
 }
 #else
 #define old_vsyscall_fixup(tk)
@@ -2186,6 +2188,7 @@ struct timespec64 get_monotonic_coarse64(void)
 
        return now;
 }
+EXPORT_SYMBOL(get_monotonic_coarse64);
 
 /*
  * Must hold jiffies_lock
index 3a95f9728778c587b316274fd510b79ed5574494..cb9ab401e2d9cad40d1f1dc41e33d52642a5d6e2 100644 (file)
@@ -59,43 +59,153 @@ __visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
 EXPORT_SYMBOL(jiffies_64);
 
 /*
- * per-CPU timer vector definitions:
+ * The timer wheel has LVL_DEPTH array levels. Each level provides an array of
+ * LVL_SIZE buckets. Each level is driven by its own clock and therefor each
+ * level has a different granularity.
+ *
+ * The level granularity is:           LVL_CLK_DIV ^ lvl
+ * The level clock frequency is:       HZ / (LVL_CLK_DIV ^ level)
+ *
+ * The array level of a newly armed timer depends on the relative expiry
+ * time. The farther the expiry time is away the higher the array level and
+ * therefor the granularity becomes.
+ *
+ * Contrary to the original timer wheel implementation, which aims for 'exact'
+ * expiry of the timers, this implementation removes the need for recascading
+ * the timers into the lower array levels. The previous 'classic' timer wheel
+ * implementation of the kernel already violated the 'exact' expiry by adding
+ * slack to the expiry time to provide batched expiration. The granularity
+ * levels provide implicit batching.
+ *
+ * This is an optimization of the original timer wheel implementation for the
+ * majority of the timer wheel use cases: timeouts. The vast majority of
+ * timeout timers (networking, disk I/O ...) are canceled before expiry. If
+ * the timeout expires it indicates that normal operation is disturbed, so it
+ * does not matter much whether the timeout comes with a slight delay.
+ *
+ * The only exception to this are networking timers with a small expiry
+ * time. They rely on the granularity. Those fit into the first wheel level,
+ * which has HZ granularity.
+ *
+ * We don't have cascading anymore. timers with a expiry time above the
+ * capacity of the last wheel level are force expired at the maximum timeout
+ * value of the last wheel level. From data sampling we know that the maximum
+ * value observed is 5 days (network connection tracking), so this should not
+ * be an issue.
+ *
+ * The currently chosen array constants values are a good compromise between
+ * array size and granularity.
+ *
+ * This results in the following granularity and range levels:
+ *
+ * HZ 1000 steps
+ * Level Offset  Granularity            Range
+ *  0      0         1 ms                0 ms -         63 ms
+ *  1     64         8 ms               64 ms -        511 ms
+ *  2    128        64 ms              512 ms -       4095 ms (512ms - ~4s)
+ *  3    192       512 ms             4096 ms -      32767 ms (~4s - ~32s)
+ *  4    256      4096 ms (~4s)      32768 ms -     262143 ms (~32s - ~4m)
+ *  5    320     32768 ms (~32s)    262144 ms -    2097151 ms (~4m - ~34m)
+ *  6    384    262144 ms (~4m)    2097152 ms -   16777215 ms (~34m - ~4h)
+ *  7    448   2097152 ms (~34m)  16777216 ms -  134217727 ms (~4h - ~1d)
+ *  8    512  16777216 ms (~4h)  134217728 ms - 1073741822 ms (~1d - ~12d)
+ *
+ * HZ  300
+ * Level Offset  Granularity            Range
+ *  0     0         3 ms                0 ms -        210 ms
+ *  1    64        26 ms              213 ms -       1703 ms (213ms - ~1s)
+ *  2   128       213 ms             1706 ms -      13650 ms (~1s - ~13s)
+ *  3   192      1706 ms (~1s)      13653 ms -     109223 ms (~13s - ~1m)
+ *  4   256     13653 ms (~13s)    109226 ms -     873810 ms (~1m - ~14m)
+ *  5   320    109226 ms (~1m)     873813 ms -    6990503 ms (~14m - ~1h)
+ *  6   384    873813 ms (~14m)   6990506 ms -   55924050 ms (~1h - ~15h)
+ *  7   448   6990506 ms (~1h)   55924053 ms -  447392423 ms (~15h - ~5d)
+ *  8    512  55924053 ms (~15h) 447392426 ms - 3579139406 ms (~5d - ~41d)
+ *
+ * HZ  250
+ * Level Offset  Granularity            Range
+ *  0     0         4 ms                0 ms -        255 ms
+ *  1    64        32 ms              256 ms -       2047 ms (256ms - ~2s)
+ *  2   128       256 ms             2048 ms -      16383 ms (~2s - ~16s)
+ *  3   192      2048 ms (~2s)      16384 ms -     131071 ms (~16s - ~2m)
+ *  4   256     16384 ms (~16s)    131072 ms -    1048575 ms (~2m - ~17m)
+ *  5   320    131072 ms (~2m)    1048576 ms -    8388607 ms (~17m - ~2h)
+ *  6   384   1048576 ms (~17m)   8388608 ms -   67108863 ms (~2h - ~18h)
+ *  7   448   8388608 ms (~2h)   67108864 ms -  536870911 ms (~18h - ~6d)
+ *  8    512  67108864 ms (~18h) 536870912 ms - 4294967288 ms (~6d - ~49d)
+ *
+ * HZ  100
+ * Level Offset  Granularity            Range
+ *  0     0         10 ms               0 ms -        630 ms
+ *  1    64         80 ms             640 ms -       5110 ms (640ms - ~5s)
+ *  2   128        640 ms            5120 ms -      40950 ms (~5s - ~40s)
+ *  3   192       5120 ms (~5s)     40960 ms -     327670 ms (~40s - ~5m)
+ *  4   256      40960 ms (~40s)   327680 ms -    2621430 ms (~5m - ~43m)
+ *  5   320     327680 ms (~5m)   2621440 ms -   20971510 ms (~43m - ~5h)
+ *  6   384    2621440 ms (~43m) 20971520 ms -  167772150 ms (~5h - ~1d)
+ *  7   448   20971520 ms (~5h) 167772160 ms - 1342177270 ms (~1d - ~15d)
  */
-#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
-#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
-#define TVN_SIZE (1 << TVN_BITS)
-#define TVR_SIZE (1 << TVR_BITS)
-#define TVN_MASK (TVN_SIZE - 1)
-#define TVR_MASK (TVR_SIZE - 1)
-#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
-
-struct tvec {
-       struct hlist_head vec[TVN_SIZE];
-};
 
-struct tvec_root {
-       struct hlist_head vec[TVR_SIZE];
-};
+/* Clock divisor for the next level */
+#define LVL_CLK_SHIFT  3
+#define LVL_CLK_DIV    (1UL << LVL_CLK_SHIFT)
+#define LVL_CLK_MASK   (LVL_CLK_DIV - 1)
+#define LVL_SHIFT(n)   ((n) * LVL_CLK_SHIFT)
+#define LVL_GRAN(n)    (1UL << LVL_SHIFT(n))
 
-struct tvec_base {
-       spinlock_t lock;
-       struct timer_list *running_timer;
-       unsigned long timer_jiffies;
-       unsigned long next_timer;
-       unsigned long active_timers;
-       unsigned long all_timers;
-       int cpu;
-       bool migration_enabled;
-       bool nohz_active;
-       struct tvec_root tv1;
-       struct tvec tv2;
-       struct tvec tv3;
-       struct tvec tv4;
-       struct tvec tv5;
-} ____cacheline_aligned;
+/*
+ * The time start value for each level to select the bucket at enqueue
+ * time.
+ */
+#define LVL_START(n)   ((LVL_SIZE - 1) << (((n) - 1) * LVL_CLK_SHIFT))
+
+/* Size of each clock level */
+#define LVL_BITS       6
+#define LVL_SIZE       (1UL << LVL_BITS)
+#define LVL_MASK       (LVL_SIZE - 1)
+#define LVL_OFFS(n)    ((n) * LVL_SIZE)
+
+/* Level depth */
+#if HZ > 100
+# define LVL_DEPTH     9
+# else
+# define LVL_DEPTH     8
+#endif
+
+/* The cutoff (max. capacity of the wheel) */
+#define WHEEL_TIMEOUT_CUTOFF   (LVL_START(LVL_DEPTH))
+#define WHEEL_TIMEOUT_MAX      (WHEEL_TIMEOUT_CUTOFF - LVL_GRAN(LVL_DEPTH - 1))
+
+/*
+ * The resulting wheel size. If NOHZ is configured we allocate two
+ * wheels so we have a separate storage for the deferrable timers.
+ */
+#define WHEEL_SIZE     (LVL_SIZE * LVL_DEPTH)
+
+#ifdef CONFIG_NO_HZ_COMMON
+# define NR_BASES      2
+# define BASE_STD      0
+# define BASE_DEF      1
+#else
+# define NR_BASES      1
+# define BASE_STD      0
+# define BASE_DEF      0
+#endif
 
+struct timer_base {
+       spinlock_t              lock;
+       struct timer_list       *running_timer;
+       unsigned long           clk;
+       unsigned long           next_expiry;
+       unsigned int            cpu;
+       bool                    migration_enabled;
+       bool                    nohz_active;
+       bool                    is_idle;
+       DECLARE_BITMAP(pending_map, WHEEL_SIZE);
+       struct hlist_head       vectors[WHEEL_SIZE];
+} ____cacheline_aligned;
 
-static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
+static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 unsigned int sysctl_timer_migration = 1;
@@ -106,15 +216,17 @@ void timers_update_migration(bool update_nohz)
        unsigned int cpu;
 
        /* Avoid the loop, if nothing to update */
-       if (this_cpu_read(tvec_bases.migration_enabled) == on)
+       if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
                return;
 
        for_each_possible_cpu(cpu) {
-               per_cpu(tvec_bases.migration_enabled, cpu) = on;
+               per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
+               per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
                per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
                if (!update_nohz)
                        continue;
-               per_cpu(tvec_bases.nohz_active, cpu) = true;
+               per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
+               per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
                per_cpu(hrtimer_bases.nohz_active, cpu) = true;
        }
 }
@@ -133,20 +245,6 @@ int timer_migration_handler(struct ctl_table *table, int write,
        mutex_unlock(&mutex);
        return ret;
 }
-
-static inline struct tvec_base *get_target_base(struct tvec_base *base,
-                                               int pinned)
-{
-       if (pinned || !base->migration_enabled)
-               return this_cpu_ptr(&tvec_bases);
-       return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
-}
-#else
-static inline struct tvec_base *get_target_base(struct tvec_base *base,
-                                               int pinned)
-{
-       return this_cpu_ptr(&tvec_bases);
-}
 #endif
 
 static unsigned long round_jiffies_common(unsigned long j, int cpu,
@@ -351,101 +449,126 @@ unsigned long round_jiffies_up_relative(unsigned long j)
 }
 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
 
-/**
- * set_timer_slack - set the allowed slack for a timer
- * @timer: the timer to be modified
- * @slack_hz: the amount of time (in jiffies) allowed for rounding
- *
- * Set the amount of time, in jiffies, that a certain timer has
- * in terms of slack. By setting this value, the timer subsystem
- * will schedule the actual timer somewhere between
- * the time mod_timer() asks for, and that time plus the slack.
- *
- * By setting the slack to -1, a percentage of the delay is used
- * instead.
- */
-void set_timer_slack(struct timer_list *timer, int slack_hz)
+
+static inline unsigned int timer_get_idx(struct timer_list *timer)
 {
-       timer->slack = slack_hz;
+       return (timer->flags & TIMER_ARRAYMASK) >> TIMER_ARRAYSHIFT;
 }
-EXPORT_SYMBOL_GPL(set_timer_slack);
 
-static void
-__internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+static inline void timer_set_idx(struct timer_list *timer, unsigned int idx)
 {
-       unsigned long expires = timer->expires;
-       unsigned long idx = expires - base->timer_jiffies;
-       struct hlist_head *vec;
+       timer->flags = (timer->flags & ~TIMER_ARRAYMASK) |
+                       idx << TIMER_ARRAYSHIFT;
+}
 
-       if (idx < TVR_SIZE) {
-               int i = expires & TVR_MASK;
-               vec = base->tv1.vec + i;
-       } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
-               int i = (expires >> TVR_BITS) & TVN_MASK;
-               vec = base->tv2.vec + i;
-       } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
-               int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
-               vec = base->tv3.vec + i;
-       } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
-               int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
-               vec = base->tv4.vec + i;
-       } else if ((signed long) idx < 0) {
-               /*
-                * Can happen if you add a timer with expires == jiffies,
-                * or you set a timer to go off in the past
-                */
-               vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
+/*
+ * Helper function to calculate the array index for a given expiry
+ * time.
+ */
+static inline unsigned calc_index(unsigned expires, unsigned lvl)
+{
+       expires = (expires + LVL_GRAN(lvl)) >> LVL_SHIFT(lvl);
+       return LVL_OFFS(lvl) + (expires & LVL_MASK);
+}
+
+static int calc_wheel_index(unsigned long expires, unsigned long clk)
+{
+       unsigned long delta = expires - clk;
+       unsigned int idx;
+
+       if (delta < LVL_START(1)) {
+               idx = calc_index(expires, 0);
+       } else if (delta < LVL_START(2)) {
+               idx = calc_index(expires, 1);
+       } else if (delta < LVL_START(3)) {
+               idx = calc_index(expires, 2);
+       } else if (delta < LVL_START(4)) {
+               idx = calc_index(expires, 3);
+       } else if (delta < LVL_START(5)) {
+               idx = calc_index(expires, 4);
+       } else if (delta < LVL_START(6)) {
+               idx = calc_index(expires, 5);
+       } else if (delta < LVL_START(7)) {
+               idx = calc_index(expires, 6);
+       } else if (LVL_DEPTH > 8 && delta < LVL_START(8)) {
+               idx = calc_index(expires, 7);
+       } else if ((long) delta < 0) {
+               idx = clk & LVL_MASK;
        } else {
-               int i;
-               /* If the timeout is larger than MAX_TVAL (on 64-bit
-                * architectures or with CONFIG_BASE_SMALL=1) then we
-                * use the maximum timeout.
+               /*
+                * Force expire obscene large timeouts to expire at the
+                * capacity limit of the wheel.
                 */
-               if (idx > MAX_TVAL) {
-                       idx = MAX_TVAL;
-                       expires = idx + base->timer_jiffies;
-               }
-               i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
-               vec = base->tv5.vec + i;
+               if (expires >= WHEEL_TIMEOUT_CUTOFF)
+                       expires = WHEEL_TIMEOUT_MAX;
+
+               idx = calc_index(expires, LVL_DEPTH - 1);
        }
+       return idx;
+}
 
-       hlist_add_head(&timer->entry, vec);
+/*
+ * Enqueue the timer into the hash bucket, mark it pending in
+ * the bitmap and store the index in the timer flags.
+ */
+static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
+                         unsigned int idx)
+{
+       hlist_add_head(&timer->entry, base->vectors + idx);
+       __set_bit(idx, base->pending_map);
+       timer_set_idx(timer, idx);
 }
 
-static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
+static void
+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
 {
-       /* Advance base->jiffies, if the base is empty */
-       if (!base->all_timers++)
-               base->timer_jiffies = jiffies;
+       unsigned int idx;
+
+       idx = calc_wheel_index(timer->expires, base->clk);
+       enqueue_timer(base, timer, idx);
+}
+
+static void
+trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
+{
+       if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+               return;
 
-       __internal_add_timer(base, timer);
        /*
-        * Update base->active_timers and base->next_timer
+        * TODO: This wants some optimizing similar to the code below, but we
+        * will do that when we switch from push to pull for deferrable timers.
         */
-       if (!(timer->flags & TIMER_DEFERRABLE)) {
-               if (!base->active_timers++ ||
-                   time_before(timer->expires, base->next_timer))
-                       base->next_timer = timer->expires;
+       if (timer->flags & TIMER_DEFERRABLE) {
+               if (tick_nohz_full_cpu(base->cpu))
+                       wake_up_nohz_cpu(base->cpu);
+               return;
        }
 
        /*
-        * Check whether the other CPU is in dynticks mode and needs
-        * to be triggered to reevaluate the timer wheel.
-        * We are protected against the other CPU fiddling
-        * with the timer by holding the timer base lock. This also
-        * makes sure that a CPU on the way to stop its tick can not
-        * evaluate the timer wheel.
-        *
-        * Spare the IPI for deferrable timers on idle targets though.
-        * The next busy ticks will take care of it. Except full dynticks
-        * require special care against races with idle_cpu(), lets deal
-        * with that later.
+        * We might have to IPI the remote CPU if the base is idle and the
+        * timer is not deferrable. If the other CPU is on the way to idle
+        * then it can't set base->is_idle as we hold the base lock:
         */
-       if (base->nohz_active) {
-               if (!(timer->flags & TIMER_DEFERRABLE) ||
-                   tick_nohz_full_cpu(base->cpu))
-                       wake_up_nohz_cpu(base->cpu);
-       }
+       if (!base->is_idle)
+               return;
+
+       /* Check whether this is the new first expiring timer: */
+       if (time_after_eq(timer->expires, base->next_expiry))
+               return;
+
+       /*
+        * Set the next expiry time and kick the CPU so it can reevaluate the
+        * wheel:
+        */
+       base->next_expiry = timer->expires;
+               wake_up_nohz_cpu(base->cpu);
+}
+
+static void
+internal_add_timer(struct timer_base *base, struct timer_list *timer)
+{
+       __internal_add_timer(base, timer);
+       trigger_dyntick_cpu(base, timer);
 }
 
 #ifdef CONFIG_TIMER_STATS
@@ -666,7 +789,6 @@ static void do_init_timer(struct timer_list *timer, unsigned int flags,
 {
        timer->entry.pprev = NULL;
        timer->flags = flags | raw_smp_processor_id();
-       timer->slack = -1;
 #ifdef CONFIG_TIMER_STATS
        timer->start_site = NULL;
        timer->start_pid = -1;
@@ -706,54 +828,125 @@ static inline void detach_timer(struct timer_list *timer, bool clear_pending)
        entry->next = LIST_POISON2;
 }
 
-static inline void
-detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
-{
-       detach_timer(timer, true);
-       if (!(timer->flags & TIMER_DEFERRABLE))
-               base->active_timers--;
-       base->all_timers--;
-}
-
-static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
+static int detach_if_pending(struct timer_list *timer, struct timer_base *base,
                             bool clear_pending)
 {
+       unsigned idx = timer_get_idx(timer);
+
        if (!timer_pending(timer))
                return 0;
 
+       if (hlist_is_singular_node(&timer->entry, base->vectors + idx))
+               __clear_bit(idx, base->pending_map);
+
        detach_timer(timer, clear_pending);
-       if (!(timer->flags & TIMER_DEFERRABLE)) {
-               base->active_timers--;
-               if (timer->expires == base->next_timer)
-                       base->next_timer = base->timer_jiffies;
-       }
-       /* If this was the last timer, advance base->jiffies */
-       if (!--base->all_timers)
-               base->timer_jiffies = jiffies;
        return 1;
 }
 
+static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
+{
+       struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
+
+       /*
+        * If the timer is deferrable and nohz is active then we need to use
+        * the deferrable base.
+        */
+       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+           (tflags & TIMER_DEFERRABLE))
+               base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
+       return base;
+}
+
+static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
+{
+       struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+       /*
+        * If the timer is deferrable and nohz is active then we need to use
+        * the deferrable base.
+        */
+       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
+           (tflags & TIMER_DEFERRABLE))
+               base = this_cpu_ptr(&timer_bases[BASE_DEF]);
+       return base;
+}
+
+static inline struct timer_base *get_timer_base(u32 tflags)
+{
+       return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
+}
+
+#ifdef CONFIG_NO_HZ_COMMON
+static inline struct timer_base *
+__get_target_base(struct timer_base *base, unsigned tflags)
+{
+#ifdef CONFIG_SMP
+       if ((tflags & TIMER_PINNED) || !base->migration_enabled)
+               return get_timer_this_cpu_base(tflags);
+       return get_timer_cpu_base(tflags, get_nohz_timer_target());
+#else
+       return get_timer_this_cpu_base(tflags);
+#endif
+}
+
+static inline void forward_timer_base(struct timer_base *base)
+{
+       /*
+        * We only forward the base when it's idle and we have a delta between
+        * base clock and jiffies.
+        */
+       if (!base->is_idle || (long) (jiffies - base->clk) < 2)
+               return;
+
+       /*
+        * If the next expiry value is > jiffies, then we fast forward to
+        * jiffies otherwise we forward to the next expiry value.
+        */
+       if (time_after(base->next_expiry, jiffies))
+               base->clk = jiffies;
+       else
+               base->clk = base->next_expiry;
+}
+#else
+static inline struct timer_base *
+__get_target_base(struct timer_base *base, unsigned tflags)
+{
+       return get_timer_this_cpu_base(tflags);
+}
+
+static inline void forward_timer_base(struct timer_base *base) { }
+#endif
+
+static inline struct timer_base *
+get_target_base(struct timer_base *base, unsigned tflags)
+{
+       struct timer_base *target = __get_target_base(base, tflags);
+
+       forward_timer_base(target);
+       return target;
+}
+
 /*
- * We are using hashed locking: holding per_cpu(tvec_bases).lock
- * means that all timers which are tied to this base via timer->base are
- * locked, and the base itself is locked too.
+ * We are using hashed locking: Holding per_cpu(timer_bases[x]).lock means
+ * that all timers which are tied to this base are locked, and the base itself
+ * is locked too.
  *
  * So __run_timers/migrate_timers can safely modify all timers which could
- * be found on ->tvX lists.
+ * be found in the base->vectors array.
  *
- * When the timer's base is locked and removed from the list, the
- * TIMER_MIGRATING flag is set, FIXME
+ * When a timer is migrating then the TIMER_MIGRATING flag is set and we need
+ * to wait until the migration is done.
  */
-static struct tvec_base *lock_timer_base(struct timer_list *timer,
-                                       unsigned long *flags)
+static struct timer_base *lock_timer_base(struct timer_list *timer,
+                                         unsigned long *flags)
        __acquires(timer->base->lock)
 {
        for (;;) {
+               struct timer_base *base;
                u32 tf = timer->flags;
-               struct tvec_base *base;
 
                if (!(tf & TIMER_MIGRATING)) {
-                       base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
+                       base = get_timer_base(tf);
                        spin_lock_irqsave(&base->lock, *flags);
                        if (timer->flags == tf)
                                return base;
@@ -764,13 +957,41 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
 }
 
 static inline int
-__mod_timer(struct timer_list *timer, unsigned long expires,
-           bool pending_only, int pinned)
+__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
 {
-       struct tvec_base *base, *new_base;
-       unsigned long flags;
+       struct timer_base *base, *new_base;
+       unsigned int idx = UINT_MAX;
+       unsigned long clk = 0, flags;
        int ret = 0;
 
+       /*
+        * This is a common optimization triggered by the networking code - if
+        * the timer is re-modified to have the same timeout or ends up in the
+        * same array bucket then just return:
+        */
+       if (timer_pending(timer)) {
+               if (timer->expires == expires)
+                       return 1;
+               /*
+                * Take the current timer_jiffies of base, but without holding
+                * the lock!
+                */
+               base = get_timer_base(timer->flags);
+               clk = base->clk;
+
+               idx = calc_wheel_index(expires, clk);
+
+               /*
+                * Retrieve and compare the array index of the pending
+                * timer. If it matches set the expiry to the new value so a
+                * subsequent call will exit in the expires check above.
+                */
+               if (idx == timer_get_idx(timer)) {
+                       timer->expires = expires;
+                       return 1;
+               }
+       }
+
        timer_stats_timer_set_start_info(timer);
        BUG_ON(!timer->function);
 
@@ -782,15 +1003,15 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 
        debug_activate(timer, expires);
 
-       new_base = get_target_base(base, pinned);
+       new_base = get_target_base(base, timer->flags);
 
        if (base != new_base) {
                /*
-                * We are trying to schedule the timer on the local CPU.
+                * We are trying to schedule the timer on the new base.
                 * However we can't change timer's base while it is running,
                 * otherwise del_timer_sync() can't detect that the timer's
-                * handler yet has not finished. This also guarantees that
-                * the timer is serialized wrt itself.
+                * handler yet has not finished. This also guarantees that the
+                * timer is serialized wrt itself.
                 */
                if (likely(base->running_timer != timer)) {
                        /* See the comment in lock_timer_base() */
@@ -805,7 +1026,18 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
        }
 
        timer->expires = expires;
-       internal_add_timer(base, timer);
+       /*
+        * If 'idx' was calculated above and the base time did not advance
+        * between calculating 'idx' and taking the lock, only enqueue_timer()
+        * and trigger_dyntick_cpu() is required. Otherwise we need to
+        * (re)calculate the wheel index via internal_add_timer().
+        */
+       if (idx != UINT_MAX && clk == base->clk) {
+               enqueue_timer(base, timer, idx);
+               trigger_dyntick_cpu(base, timer);
+       } else {
+               internal_add_timer(base, timer);
+       }
 
 out_unlock:
        spin_unlock_irqrestore(&base->lock, flags);
@@ -825,49 +1057,10 @@ out_unlock:
  */
 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
 {
-       return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
+       return __mod_timer(timer, expires, true);
 }
 EXPORT_SYMBOL(mod_timer_pending);
 
-/*
- * Decide where to put the timer while taking the slack into account
- *
- * Algorithm:
- *   1) calculate the maximum (absolute) time
- *   2) calculate the highest bit where the expires and new max are different
- *   3) use this bit to make a mask
- *   4) use the bitmask to round down the maximum time, so that all last
- *      bits are zeros
- */
-static inline
-unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
-{
-       unsigned long expires_limit, mask;
-       int bit;
-
-       if (timer->slack >= 0) {
-               expires_limit = expires + timer->slack;
-       } else {
-               long delta = expires - jiffies;
-
-               if (delta < 256)
-                       return expires;
-
-               expires_limit = expires + delta / 256;
-       }
-       mask = expires ^ expires_limit;
-       if (mask == 0)
-               return expires;
-
-       bit = __fls(mask);
-
-       mask = (1UL << bit) - 1;
-
-       expires_limit = expires_limit & ~(mask);
-
-       return expires_limit;
-}
-
 /**
  * mod_timer - modify a timer's timeout
  * @timer: the timer to be modified
@@ -890,48 +1083,10 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
  */
 int mod_timer(struct timer_list *timer, unsigned long expires)
 {
-       expires = apply_slack(timer, expires);
-
-       /*
-        * This is a common optimization triggered by the
-        * networking code - if the timer is re-modified
-        * to be the same thing then just return:
-        */
-       if (timer_pending(timer) && timer->expires == expires)
-               return 1;
-
-       return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
+       return __mod_timer(timer, expires, false);
 }
 EXPORT_SYMBOL(mod_timer);
 
-/**
- * mod_timer_pinned - modify a timer's timeout
- * @timer: the timer to be modified
- * @expires: new timeout in jiffies
- *
- * mod_timer_pinned() is a way to update the expire field of an
- * active timer (if the timer is inactive it will be activated)
- * and to ensure that the timer is scheduled on the current CPU.
- *
- * Note that this does not prevent the timer from being migrated
- * when the current CPU goes offline.  If this is a problem for
- * you, use CPU-hotplug notifiers to handle it correctly, for
- * example, cancelling the timer when the corresponding CPU goes
- * offline.
- *
- * mod_timer_pinned(timer, expires) is equivalent to:
- *
- *     del_timer(timer); timer->expires = expires; add_timer(timer);
- */
-int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
-{
-       if (timer->expires == expires && timer_pending(timer))
-               return 1;
-
-       return __mod_timer(timer, expires, false, TIMER_PINNED);
-}
-EXPORT_SYMBOL(mod_timer_pinned);
-
 /**
  * add_timer - start a timer
  * @timer: the timer to be added
@@ -962,13 +1117,14 @@ EXPORT_SYMBOL(add_timer);
  */
 void add_timer_on(struct timer_list *timer, int cpu)
 {
-       struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu);
-       struct tvec_base *base;
+       struct timer_base *new_base, *base;
        unsigned long flags;
 
        timer_stats_timer_set_start_info(timer);
        BUG_ON(timer_pending(timer) || !timer->function);
 
+       new_base = get_timer_cpu_base(timer->flags, cpu);
+
        /*
         * If @timer was on a different CPU, it should be migrated with the
         * old base locked to prevent other operations proceeding with the
@@ -1004,7 +1160,7 @@ EXPORT_SYMBOL_GPL(add_timer_on);
  */
 int del_timer(struct timer_list *timer)
 {
-       struct tvec_base *base;
+       struct timer_base *base;
        unsigned long flags;
        int ret = 0;
 
@@ -1030,7 +1186,7 @@ EXPORT_SYMBOL(del_timer);
  */
 int try_to_del_timer_sync(struct timer_list *timer)
 {
-       struct tvec_base *base;
+       struct timer_base *base;
        unsigned long flags;
        int ret = -1;
 
@@ -1114,27 +1270,6 @@ int del_timer_sync(struct timer_list *timer)
 EXPORT_SYMBOL(del_timer_sync);
 #endif
 
-static int cascade(struct tvec_base *base, struct tvec *tv, int index)
-{
-       /* cascade all the timers from tv up one level */
-       struct timer_list *timer;
-       struct hlist_node *tmp;
-       struct hlist_head tv_list;
-
-       hlist_move_list(tv->vec + index, &tv_list);
-
-       /*
-        * We are removing _all_ timers from the list, so we
-        * don't have to detach them individually.
-        */
-       hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
-               /* No accounting, while moving them */
-               __internal_add_timer(base, timer);
-       }
-
-       return index;
-}
-
 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
                          unsigned long data)
 {
@@ -1178,147 +1313,141 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
        }
 }
 
-#define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
-
-/**
- * __run_timers - run all expired timers (if any) on this CPU.
- * @base: the timer vector to be processed.
- *
- * This function cascades all vectors and executes all expired timer
- * vectors.
- */
-static inline void __run_timers(struct tvec_base *base)
+static void expire_timers(struct timer_base *base, struct hlist_head *head)
 {
-       struct timer_list *timer;
+       while (!hlist_empty(head)) {
+               struct timer_list *timer;
+               void (*fn)(unsigned long);
+               unsigned long data;
 
-       spin_lock_irq(&base->lock);
+               timer = hlist_entry(head->first, struct timer_list, entry);
+               timer_stats_account_timer(timer);
 
-       while (time_after_eq(jiffies, base->timer_jiffies)) {
-               struct hlist_head work_list;
-               struct hlist_head *head = &work_list;
-               int index;
+               base->running_timer = timer;
+               detach_timer(timer, true);
 
-               if (!base->all_timers) {
-                       base->timer_jiffies = jiffies;
-                       break;
+               fn = timer->function;
+               data = timer->data;
+
+               if (timer->flags & TIMER_IRQSAFE) {
+                       spin_unlock(&base->lock);
+                       call_timer_fn(timer, fn, data);
+                       spin_lock(&base->lock);
+               } else {
+                       spin_unlock_irq(&base->lock);
+                       call_timer_fn(timer, fn, data);
+                       spin_lock_irq(&base->lock);
                }
+       }
+}
 
-               index = base->timer_jiffies & TVR_MASK;
+static int __collect_expired_timers(struct timer_base *base,
+                                   struct hlist_head *heads)
+{
+       unsigned long clk = base->clk;
+       struct hlist_head *vec;
+       int i, levels = 0;
+       unsigned int idx;
 
-               /*
-                * Cascade timers:
-                */
-               if (!index &&
-                       (!cascade(base, &base->tv2, INDEX(0))) &&
-                               (!cascade(base, &base->tv3, INDEX(1))) &&
-                                       !cascade(base, &base->tv4, INDEX(2)))
-                       cascade(base, &base->tv5, INDEX(3));
-               ++base->timer_jiffies;
-               hlist_move_list(base->tv1.vec + index, head);
-               while (!hlist_empty(head)) {
-                       void (*fn)(unsigned long);
-                       unsigned long data;
-                       bool irqsafe;
-
-                       timer = hlist_entry(head->first, struct timer_list, entry);
-                       fn = timer->function;
-                       data = timer->data;
-                       irqsafe = timer->flags & TIMER_IRQSAFE;
-
-                       timer_stats_account_timer(timer);
-
-                       base->running_timer = timer;
-                       detach_expired_timer(timer, base);
-
-                       if (irqsafe) {
-                               spin_unlock(&base->lock);
-                               call_timer_fn(timer, fn, data);
-                               spin_lock(&base->lock);
-                       } else {
-                               spin_unlock_irq(&base->lock);
-                               call_timer_fn(timer, fn, data);
-                               spin_lock_irq(&base->lock);
-                       }
+       for (i = 0; i < LVL_DEPTH; i++) {
+               idx = (clk & LVL_MASK) + i * LVL_SIZE;
+
+               if (__test_and_clear_bit(idx, base->pending_map)) {
+                       vec = base->vectors + idx;
+                       hlist_move_list(vec, heads++);
+                       levels++;
                }
+               /* Is it time to look at the next level? */
+               if (clk & LVL_CLK_MASK)
+                       break;
+               /* Shift clock for the next level granularity */
+               clk >>= LVL_CLK_SHIFT;
        }
-       base->running_timer = NULL;
-       spin_unlock_irq(&base->lock);
+       return levels;
 }
 
 #ifdef CONFIG_NO_HZ_COMMON
 /*
- * Find out when the next timer event is due to happen. This
- * is used on S/390 to stop all activity when a CPU is idle.
- * This function needs to be called with interrupts disabled.
+ * Find the next pending bucket of a level. Search from level start (@offset)
+ * + @clk upwards and if nothing there, search from start of the level
+ * (@offset) up to @offset + clk.
  */
-static unsigned long __next_timer_interrupt(struct tvec_base *base)
-{
-       unsigned long timer_jiffies = base->timer_jiffies;
-       unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
-       int index, slot, array, found = 0;
-       struct timer_list *nte;
-       struct tvec *varray[4];
-
-       /* Look for timer events in tv1. */
-       index = slot = timer_jiffies & TVR_MASK;
-       do {
-               hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
-                       if (nte->flags & TIMER_DEFERRABLE)
-                               continue;
-
-                       found = 1;
-                       expires = nte->expires;
-                       /* Look at the cascade bucket(s)? */
-                       if (!index || slot < index)
-                               goto cascade;
-                       return expires;
+static int next_pending_bucket(struct timer_base *base, unsigned offset,
+                              unsigned clk)
+{
+       unsigned pos, start = offset + clk;
+       unsigned end = offset + LVL_SIZE;
+
+       pos = find_next_bit(base->pending_map, end, start);
+       if (pos < end)
+               return pos - start;
+
+       pos = find_next_bit(base->pending_map, start, offset);
+       return pos < start ? pos + LVL_SIZE - start : -1;
+}
+
+/*
+ * Search the first expiring timer in the various clock levels. Caller must
+ * hold base->lock.
+ */
+static unsigned long __next_timer_interrupt(struct timer_base *base)
+{
+       unsigned long clk, next, adj;
+       unsigned lvl, offset = 0;
+
+       next = base->clk + NEXT_TIMER_MAX_DELTA;
+       clk = base->clk;
+       for (lvl = 0; lvl < LVL_DEPTH; lvl++, offset += LVL_SIZE) {
+               int pos = next_pending_bucket(base, offset, clk & LVL_MASK);
+
+               if (pos >= 0) {
+                       unsigned long tmp = clk + (unsigned long) pos;
+
+                       tmp <<= LVL_SHIFT(lvl);
+                       if (time_before(tmp, next))
+                               next = tmp;
                }
-               slot = (slot + 1) & TVR_MASK;
-       } while (slot != index);
-
-cascade:
-       /* Calculate the next cascade event */
-       if (index)
-               timer_jiffies += TVR_SIZE - index;
-       timer_jiffies >>= TVR_BITS;
-
-       /* Check tv2-tv5. */
-       varray[0] = &base->tv2;
-       varray[1] = &base->tv3;
-       varray[2] = &base->tv4;
-       varray[3] = &base->tv5;
-
-       for (array = 0; array < 4; array++) {
-               struct tvec *varp = varray[array];
-
-               index = slot = timer_jiffies & TVN_MASK;
-               do {
-                       hlist_for_each_entry(nte, varp->vec + slot, entry) {
-                               if (nte->flags & TIMER_DEFERRABLE)
-                                       continue;
-
-                               found = 1;
-                               if (time_before(nte->expires, expires))
-                                       expires = nte->expires;
-                       }
-                       /*
-                        * Do we still search for the first timer or are
-                        * we looking up the cascade buckets ?
-                        */
-                       if (found) {
-                               /* Look at the cascade bucket(s)? */
-                               if (!index || slot < index)
-                                       break;
-                               return expires;
-                       }
-                       slot = (slot + 1) & TVN_MASK;
-               } while (slot != index);
-
-               if (index)
-                       timer_jiffies += TVN_SIZE - index;
-               timer_jiffies >>= TVN_BITS;
+               /*
+                * Clock for the next level. If the current level clock lower
+                * bits are zero, we look at the next level as is. If not we
+                * need to advance it by one because that's going to be the
+                * next expiring bucket in that level. base->clk is the next
+                * expiring jiffie. So in case of:
+                *
+                * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+                *  0    0    0    0    0    0
+                *
+                * we have to look at all levels @index 0. With
+                *
+                * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+                *  0    0    0    0    0    2
+                *
+                * LVL0 has the next expiring bucket @index 2. The upper
+                * levels have the next expiring bucket @index 1.
+                *
+                * In case that the propagation wraps the next level the same
+                * rules apply:
+                *
+                * LVL5 LVL4 LVL3 LVL2 LVL1 LVL0
+                *  0    0    0    0    F    2
+                *
+                * So after looking at LVL0 we get:
+                *
+                * LVL5 LVL4 LVL3 LVL2 LVL1
+                *  0    0    0    1    0
+                *
+                * So no propagation from LVL1 to LVL2 because that happened
+                * with the add already, but then we need to propagate further
+                * from LVL2 to LVL3.
+                *
+                * So the simple check whether the lower bits of the current
+                * level are 0 or not is sufficient for all cases.
+                */
+               adj = clk & LVL_CLK_MASK ? 1 : 0;
+               clk >>= LVL_CLK_SHIFT;
+               clk += adj;
        }
-       return expires;
+       return next;
 }
 
 /*
@@ -1364,7 +1493,7 @@ static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
  */
 u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 {
-       struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+       struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
        u64 expires = KTIME_MAX;
        unsigned long nextevt;
 
@@ -1376,19 +1505,80 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
                return expires;
 
        spin_lock(&base->lock);
-       if (base->active_timers) {
-               if (time_before_eq(base->next_timer, base->timer_jiffies))
-                       base->next_timer = __next_timer_interrupt(base);
-               nextevt = base->next_timer;
-               if (time_before_eq(nextevt, basej))
-                       expires = basem;
-               else
-                       expires = basem + (nextevt - basej) * TICK_NSEC;
+       nextevt = __next_timer_interrupt(base);
+       base->next_expiry = nextevt;
+       /*
+        * We have a fresh next event. Check whether we can forward the base:
+        */
+       if (time_after(nextevt, jiffies))
+               base->clk = jiffies;
+       else if (time_after(nextevt, base->clk))
+               base->clk = nextevt;
+
+       if (time_before_eq(nextevt, basej)) {
+               expires = basem;
+               base->is_idle = false;
+       } else {
+               expires = basem + (nextevt - basej) * TICK_NSEC;
+               /*
+                * If we expect to sleep more than a tick, mark the base idle:
+                */
+               if ((expires - basem) > TICK_NSEC)
+                       base->is_idle = true;
        }
        spin_unlock(&base->lock);
 
        return cmp_next_hrtimer_event(basem, expires);
 }
+
+/**
+ * timer_clear_idle - Clear the idle state of the timer base
+ *
+ * Called with interrupts disabled
+ */
+void timer_clear_idle(void)
+{
+       struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
+       /*
+        * We do this unlocked. The worst outcome is a remote enqueue sending
+        * a pointless IPI, but taking the lock would just make the window for
+        * sending the IPI a few instructions smaller for the cost of taking
+        * the lock in the exit from idle path.
+        */
+       base->is_idle = false;
+}
+
+static int collect_expired_timers(struct timer_base *base,
+                                 struct hlist_head *heads)
+{
+       /*
+        * NOHZ optimization. After a long idle sleep we need to forward the
+        * base to current jiffies. Avoid a loop by searching the bitfield for
+        * the next expiring timer.
+        */
+       if ((long)(jiffies - base->clk) > 2) {
+               unsigned long next = __next_timer_interrupt(base);
+
+               /*
+                * If the next timer is ahead of time forward to current
+                * jiffies, otherwise forward to the next expiry time:
+                */
+               if (time_after(next, jiffies)) {
+                       /* The call site will increment clock! */
+                       base->clk = jiffies - 1;
+                       return 0;
+               }
+               base->clk = next;
+       }
+       return __collect_expired_timers(base, heads);
+}
+#else
+static inline int collect_expired_timers(struct timer_base *base,
+                                        struct hlist_head *heads)
+{
+       return __collect_expired_timers(base, heads);
+}
 #endif
 
 /*
@@ -1411,15 +1601,42 @@ void update_process_times(int user_tick)
        run_posix_cpu_timers(p);
 }
 
+/**
+ * __run_timers - run all expired timers (if any) on this CPU.
+ * @base: the timer vector to be processed.
+ */
+static inline void __run_timers(struct timer_base *base)
+{
+       struct hlist_head heads[LVL_DEPTH];
+       int levels;
+
+       if (!time_after_eq(jiffies, base->clk))
+               return;
+
+       spin_lock_irq(&base->lock);
+
+       while (time_after_eq(jiffies, base->clk)) {
+
+               levels = collect_expired_timers(base, heads);
+               base->clk++;
+
+               while (levels--)
+                       expire_timers(base, heads + levels);
+       }
+       base->running_timer = NULL;
+       spin_unlock_irq(&base->lock);
+}
+
 /*
  * This function runs timers and the timer-tq in bottom half context.
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-       struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+       struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
 
-       if (time_after_eq(jiffies, base->timer_jiffies))
-               __run_timers(base);
+       __run_timers(base);
+       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
+               __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
 }
 
 /*
@@ -1427,7 +1644,18 @@ static void run_timer_softirq(struct softirq_action *h)
  */
 void run_local_timers(void)
 {
+       struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
+
        hrtimer_run_queues();
+       /* Raise the softirq only if required. */
+       if (time_before(jiffies, base->clk)) {
+               if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
+                       return;
+               /* CPU is awake, so check the deferrable base. */
+               base++;
+               if (time_before(jiffies, base->clk))
+                       return;
+       }
        raise_softirq(TIMER_SOFTIRQ);
 }
 
@@ -1512,7 +1740,7 @@ signed long __sched schedule_timeout(signed long timeout)
        expire = timeout + jiffies;
 
        setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
-       __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
+       __mod_timer(&timer, expire, false);
        schedule();
        del_singleshot_timer_sync(&timer);
 
@@ -1563,14 +1791,13 @@ signed long __sched schedule_timeout_idle(signed long timeout)
 EXPORT_SYMBOL(schedule_timeout_idle);
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
+static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *head)
 {
        struct timer_list *timer;
        int cpu = new_base->cpu;
 
        while (!hlist_empty(head)) {
                timer = hlist_entry(head->first, struct timer_list, entry);
-               /* We ignore the accounting on the dying cpu */
                detach_timer(timer, false);
                timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
                internal_add_timer(new_base, timer);
@@ -1579,37 +1806,31 @@ static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *he
 
 static void migrate_timers(int cpu)
 {
-       struct tvec_base *old_base;
-       struct tvec_base *new_base;
-       int i;
+       struct timer_base *old_base;
+       struct timer_base *new_base;
+       int b, i;
 
        BUG_ON(cpu_online(cpu));
-       old_base = per_cpu_ptr(&tvec_bases, cpu);
-       new_base = get_cpu_ptr(&tvec_bases);
-       /*
-        * The caller is globally serialized and nobody else
-        * takes two locks at once, deadlock is not possible.
-        */
-       spin_lock_irq(&new_base->lock);
-       spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
-       BUG_ON(old_base->running_timer);
-
-       for (i = 0; i < TVR_SIZE; i++)
-               migrate_timer_list(new_base, old_base->tv1.vec + i);
-       for (i = 0; i < TVN_SIZE; i++) {
-               migrate_timer_list(new_base, old_base->tv2.vec + i);
-               migrate_timer_list(new_base, old_base->tv3.vec + i);
-               migrate_timer_list(new_base, old_base->tv4.vec + i);
-               migrate_timer_list(new_base, old_base->tv5.vec + i);
-       }
 
-       old_base->active_timers = 0;
-       old_base->all_timers = 0;
+       for (b = 0; b < NR_BASES; b++) {
+               old_base = per_cpu_ptr(&timer_bases[b], cpu);
+               new_base = get_cpu_ptr(&timer_bases[b]);
+               /*
+                * The caller is globally serialized and nobody else
+                * takes two locks at once, deadlock is not possible.
+                */
+               spin_lock_irq(&new_base->lock);
+               spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
+
+               BUG_ON(old_base->running_timer);
 
-       spin_unlock(&old_base->lock);
-       spin_unlock_irq(&new_base->lock);
-       put_cpu_ptr(&tvec_bases);
+               for (i = 0; i < WHEEL_SIZE; i++)
+                       migrate_timer_list(new_base, old_base->vectors + i);
+
+               spin_unlock(&old_base->lock);
+               spin_unlock_irq(&new_base->lock);
+               put_cpu_ptr(&timer_bases);
+       }
 }
 
 static int timer_cpu_notify(struct notifier_block *self,
@@ -1637,13 +1858,15 @@ static inline void timer_register_cpu_notifier(void) { }
 
 static void __init init_timer_cpu(int cpu)
 {
-       struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
-
-       base->cpu = cpu;
-       spin_lock_init(&base->lock);
+       struct timer_base *base;
+       int i;
 
-       base->timer_jiffies = jiffies;
-       base->next_timer = base->timer_jiffies;
+       for (i = 0; i < NR_BASES; i++) {
+               base = per_cpu_ptr(&timer_bases[i], cpu);
+               base->cpu = cpu;
+               spin_lock_init(&base->lock);
+               base->clk = jiffies;
+       }
 }
 
 static void __init init_timer_cpus(void)
@@ -1702,9 +1925,15 @@ static void __sched do_usleep_range(unsigned long min, unsigned long max)
 }
 
 /**
- * usleep_range - Drop in replacement for udelay where wakeup is flexible
+ * usleep_range - Sleep for an approximate time
  * @min: Minimum time in usecs to sleep
  * @max: Maximum time in usecs to sleep
+ *
+ * In non-atomic context where the exact wakeup time is flexible, use
+ * usleep_range() instead of udelay().  The sleep improves responsiveness
+ * by avoiding the CPU-hogging busy-wait of udelay(), and the range reduces
+ * power usage by allowing hrtimers to take advantage of an already-
+ * scheduled interrupt instead of scheduling a new one just for this sleep.
  */
 void __sched usleep_range(unsigned long min, unsigned long max)
 {
index 1adecb4b87c8492e558b845914bd5c9a004fe9c0..087204c733ebb39c881c2b58b6937ad4ffe3d756 100644 (file)
@@ -279,7 +279,7 @@ static void print_name_offset(struct seq_file *m, unsigned long addr)
 
 static int tstats_show(struct seq_file *m, void *v)
 {
-       struct timespec period;
+       struct timespec64 period;
        struct entry *entry;
        unsigned long ms;
        long events = 0;
@@ -295,11 +295,11 @@ static int tstats_show(struct seq_file *m, void *v)
 
        time = ktime_sub(time_stop, time_start);
 
-       period = ktime_to_timespec(time);
+       period = ktime_to_timespec64(time);
        ms = period.tv_nsec / 1000000;
 
        seq_puts(m, "Timer Stats Version: v0.3\n");
-       seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
+       seq_printf(m, "Sample period: %ld.%03ld s\n", (long)period.tv_sec, ms);
        if (atomic_read(&overflow_count))
                seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
        seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
index fa0bdeee17ac3c01d1cc04283b9629e1586d1c67..75961b3decfe9489cfa08459a8efba8931a0343a 100644 (file)
@@ -81,6 +81,104 @@ static unsigned long sum_online;
 static int min_online = -1;
 static int max_online;
 
+/*
+ * Attempt to take a CPU offline.  Return false if the CPU is already
+ * offline or if it is not subject to CPU-hotplug operations.  The
+ * caller can detect other failures by looking at the statistics.
+ */
+bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
+                    unsigned long *sum_offl, int *min_offl, int *max_offl)
+{
+       unsigned long delta;
+       int ret;
+       unsigned long starttime;
+
+       if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
+               return false;
+
+       if (verbose)
+               pr_alert("%s" TORTURE_FLAG
+                        "torture_onoff task: offlining %d\n",
+                        torture_type, cpu);
+       starttime = jiffies;
+       (*n_offl_attempts)++;
+       ret = cpu_down(cpu);
+       if (ret) {
+               if (verbose)
+                       pr_alert("%s" TORTURE_FLAG
+                                "torture_onoff task: offline %d failed: errno %d\n",
+                                torture_type, cpu, ret);
+       } else {
+               if (verbose)
+                       pr_alert("%s" TORTURE_FLAG
+                                "torture_onoff task: offlined %d\n",
+                                torture_type, cpu);
+               (*n_offl_successes)++;
+               delta = jiffies - starttime;
+               sum_offl += delta;
+               if (*min_offl < 0) {
+                       *min_offl = delta;
+                       *max_offl = delta;
+               }
+               if (*min_offl > delta)
+                       *min_offl = delta;
+               if (*max_offl < delta)
+                       *max_offl = delta;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(torture_offline);
+
+/*
+ * Attempt to bring a CPU online.  Return false if the CPU is already
+ * online or if it is not subject to CPU-hotplug operations.  The
+ * caller can detect other failures by looking at the statistics.
+ */
+bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
+                   unsigned long *sum_onl, int *min_onl, int *max_onl)
+{
+       unsigned long delta;
+       int ret;
+       unsigned long starttime;
+
+       if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
+               return false;
+
+       if (verbose)
+               pr_alert("%s" TORTURE_FLAG
+                        "torture_onoff task: onlining %d\n",
+                        torture_type, cpu);
+       starttime = jiffies;
+       (*n_onl_attempts)++;
+       ret = cpu_up(cpu);
+       if (ret) {
+               if (verbose)
+                       pr_alert("%s" TORTURE_FLAG
+                                "torture_onoff task: online %d failed: errno %d\n",
+                                torture_type, cpu, ret);
+       } else {
+               if (verbose)
+                       pr_alert("%s" TORTURE_FLAG
+                                "torture_onoff task: onlined %d\n",
+                                torture_type, cpu);
+               (*n_onl_successes)++;
+               delta = jiffies - starttime;
+               *sum_onl += delta;
+               if (*min_onl < 0) {
+                       *min_onl = delta;
+                       *max_onl = delta;
+               }
+               if (*min_onl > delta)
+                       *min_onl = delta;
+               if (*max_onl < delta)
+                       *max_onl = delta;
+       }
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(torture_online);
+
 /*
  * Execute random CPU-hotplug operations at the interval specified
  * by the onoff_interval.
@@ -89,16 +187,19 @@ static int
 torture_onoff(void *arg)
 {
        int cpu;
-       unsigned long delta;
        int maxcpu = -1;
        DEFINE_TORTURE_RANDOM(rand);
-       int ret;
-       unsigned long starttime;
 
        VERBOSE_TOROUT_STRING("torture_onoff task started");
        for_each_online_cpu(cpu)
                maxcpu = cpu;
        WARN_ON(maxcpu < 0);
+
+       if (maxcpu == 0) {
+               VERBOSE_TOROUT_STRING("Only one CPU, so CPU-hotplug testing is disabled");
+               goto stop;
+       }
+
        if (onoff_holdoff > 0) {
                VERBOSE_TOROUT_STRING("torture_onoff begin holdoff");
                schedule_timeout_interruptible(onoff_holdoff);
@@ -106,69 +207,16 @@ torture_onoff(void *arg)
        }
        while (!torture_must_stop()) {
                cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
-               if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
-                       if (verbose)
-                               pr_alert("%s" TORTURE_FLAG
-                                        "torture_onoff task: offlining %d\n",
-                                        torture_type, cpu);
-                       starttime = jiffies;
-                       n_offline_attempts++;
-                       ret = cpu_down(cpu);
-                       if (ret) {
-                               if (verbose)
-                                       pr_alert("%s" TORTURE_FLAG
-                                                "torture_onoff task: offline %d failed: errno %d\n",
-                                                torture_type, cpu, ret);
-                       } else {
-                               if (verbose)
-                                       pr_alert("%s" TORTURE_FLAG
-                                                "torture_onoff task: offlined %d\n",
-                                                torture_type, cpu);
-                               n_offline_successes++;
-                               delta = jiffies - starttime;
-                               sum_offline += delta;
-                               if (min_offline < 0) {
-                                       min_offline = delta;
-                                       max_offline = delta;
-                               }
-                               if (min_offline > delta)
-                                       min_offline = delta;
-                               if (max_offline < delta)
-                                       max_offline = delta;
-                       }
-               } else if (cpu_is_hotpluggable(cpu)) {
-                       if (verbose)
-                               pr_alert("%s" TORTURE_FLAG
-                                        "torture_onoff task: onlining %d\n",
-                                        torture_type, cpu);
-                       starttime = jiffies;
-                       n_online_attempts++;
-                       ret = cpu_up(cpu);
-                       if (ret) {
-                               if (verbose)
-                                       pr_alert("%s" TORTURE_FLAG
-                                                "torture_onoff task: online %d failed: errno %d\n",
-                                                torture_type, cpu, ret);
-                       } else {
-                               if (verbose)
-                                       pr_alert("%s" TORTURE_FLAG
-                                                "torture_onoff task: onlined %d\n",
-                                                torture_type, cpu);
-                               n_online_successes++;
-                               delta = jiffies - starttime;
-                               sum_online += delta;
-                               if (min_online < 0) {
-                                       min_online = delta;
-                                       max_online = delta;
-                               }
-                               if (min_online > delta)
-                                       min_online = delta;
-                               if (max_online < delta)
-                                       max_online = delta;
-                       }
-               }
+               if (!torture_offline(cpu,
+                                    &n_offline_attempts, &n_offline_successes,
+                                    &sum_offline, &min_offline, &max_offline))
+                       torture_online(cpu,
+                                      &n_online_attempts, &n_online_successes,
+                                      &sum_online, &min_online, &max_online);
                schedule_timeout_interruptible(onoff_interval);
        }
+
+stop:
        torture_kthread_stopping("torture_onoff");
        return 0;
 }
index e1c0e996b5ae63175feb24d5d6a6c8b541b63287..97e7b793df35be3a1ce01d3bf22ed2dc2d28fd84 100644 (file)
@@ -4600,15 +4600,11 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
        if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
                return;
 
-       /* is @cpu the only online CPU? */
        cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
-       if (cpumask_weight(&cpumask) != 1)
-               return;
 
        /* as we're called from CPU_ONLINE, the following shouldn't fail */
        for_each_pool_worker(worker, pool)
-               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
-                                                 pool->attrs->cpumask) < 0);
+               WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
 }
 
 /*
index b9cfdbfae9aaa90a6a3a85f83887128335fe36b7..805b7048a1bdcbbf31471f67b6f9e07afb11fca9 100644 (file)
@@ -1307,22 +1307,6 @@ config RCU_PERF_TEST
          Say M if you want the RCU performance tests to build as a module.
          Say N if you are unsure.
 
-config RCU_PERF_TEST_RUNNABLE
-       bool "performance tests for RCU runnable by default"
-       depends on RCU_PERF_TEST = y
-       default n
-       help
-         This option provides a way to build the RCU performance tests
-         directly into the kernel without them starting up at boot time.
-         You can use /sys/module to manually override this setting.
-         This /proc file is available only when the RCU performance
-         tests have been built into the kernel.
-
-         Say Y here if you want the RCU performance tests to start during
-         boot (you probably don't).
-         Say N here if you want the RCU performance tests to start only
-         after being manually enabled via /sys/module.
-
 config RCU_TORTURE_TEST
        tristate "torture tests for RCU"
        depends on DEBUG_KERNEL
@@ -1340,23 +1324,6 @@ config RCU_TORTURE_TEST
          Say M if you want the RCU torture tests to build as a module.
          Say N if you are unsure.
 
-config RCU_TORTURE_TEST_RUNNABLE
-       bool "torture tests for RCU runnable by default"
-       depends on RCU_TORTURE_TEST = y
-       default n
-       help
-         This option provides a way to build the RCU torture tests
-         directly into the kernel without them starting up at boot
-         time.  You can use /proc/sys/kernel/rcutorture_runnable
-         to manually override this setting.  This /proc file is
-         available only when the RCU torture tests have been built
-         into the kernel.
-
-         Say Y here if you want the RCU torture tests to start during
-         boot (you probably don't).
-         Say N here if you want the RCU torture tests to start only
-         after being manually enabled via /proc.
-
 config RCU_TORTURE_TEST_SLOW_PREINIT
        bool "Slow down RCU grace-period pre-initialization to expose races"
        depends on RCU_TORTURE_TEST
index ff6a7a6c63951f080a655df67f7ad0524a03201a..07d06a8b9788c5390f32b39ed165a08e502d673d 100644 (file)
@@ -15,9 +15,6 @@ KCOV_INSTRUMENT_rbtree.o := n
 KCOV_INSTRUMENT_list_debug.o := n
 KCOV_INSTRUMENT_debugobjects.o := n
 KCOV_INSTRUMENT_dynamic_debug.o := n
-# Kernel does not boot if we instrument this file as it uses custom calling
-# convention (see CONFIG_ARCH_HWEIGHT_CFLAGS).
-KCOV_INSTRUMENT_hweight.o := n
 
 lib-y := ctype.o string.o vsprintf.o cmdline.o \
         rbtree.o radix-tree.o dump_stack.o timerqueue.o\
@@ -74,8 +71,6 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
 obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
 obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
 
-GCOV_PROFILE_hweight.o := n
-CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 
 obj-$(CONFIG_BTREE) += btree.o
index 2886ebac656772c8f77fe26f01d95175e0c18142..53c2d5edc826d4dd322e1623c3aa04297b762a2b 100644 (file)
@@ -96,17 +96,41 @@ long long atomic64_##op##_return(long long a, atomic64_t *v)                \
 }                                                                      \
 EXPORT_SYMBOL(atomic64_##op##_return);
 
+#define ATOMIC64_FETCH_OP(op, c_op)                                    \
+long long atomic64_fetch_##op(long long a, atomic64_t *v)              \
+{                                                                      \
+       unsigned long flags;                                            \
+       raw_spinlock_t *lock = lock_addr(v);                            \
+       long long val;                                                  \
+                                                                       \
+       raw_spin_lock_irqsave(lock, flags);                             \
+       val = v->counter;                                               \
+       v->counter c_op a;                                              \
+       raw_spin_unlock_irqrestore(lock, flags);                        \
+       return val;                                                     \
+}                                                                      \
+EXPORT_SYMBOL(atomic64_fetch_##op);
+
 #define ATOMIC64_OPS(op, c_op)                                         \
        ATOMIC64_OP(op, c_op)                                           \
-       ATOMIC64_OP_RETURN(op, c_op)
+       ATOMIC64_OP_RETURN(op, c_op)                                    \
+       ATOMIC64_FETCH_OP(op, c_op)
 
 ATOMIC64_OPS(add, +=)
 ATOMIC64_OPS(sub, -=)
-ATOMIC64_OP(and, &=)
-ATOMIC64_OP(or, |=)
-ATOMIC64_OP(xor, ^=)
 
 #undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op)                                         \
+       ATOMIC64_OP(op, c_op)                                           \
+       ATOMIC64_OP_RETURN(op, c_op)                                    \
+       ATOMIC64_FETCH_OP(op, c_op)
+
+ATOMIC64_OPS(and, &=)
+ATOMIC64_OPS(or, |=)
+ATOMIC64_OPS(xor, ^=)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
index 1234818143204a754ec6df5ae8073107c5171dc5..dbb369145dda4665773f7f964b95a021b6d9a06d 100644 (file)
@@ -53,11 +53,25 @@ do {                                                                \
        BUG_ON(atomic##bit##_read(&v) != r);                    \
 } while (0)
 
+#define TEST_FETCH(bit, op, c_op, val)                         \
+do {                                                           \
+       atomic##bit##_set(&v, v0);                              \
+       r = v0;                                                 \
+       r c_op val;                                             \
+       BUG_ON(atomic##bit##_##op(val, &v) != v0);              \
+       BUG_ON(atomic##bit##_read(&v) != r);                    \
+} while (0)
+
 #define RETURN_FAMILY_TEST(bit, op, c_op, val)                 \
 do {                                                           \
        FAMILY_TEST(TEST_RETURN, bit, op, c_op, val);           \
 } while (0)
 
+#define FETCH_FAMILY_TEST(bit, op, c_op, val)                  \
+do {                                                           \
+       FAMILY_TEST(TEST_FETCH, bit, op, c_op, val);            \
+} while (0)
+
 #define TEST_ARGS(bit, op, init, ret, expect, args...)         \
 do {                                                           \
        atomic##bit##_set(&v, init);                            \
@@ -114,6 +128,16 @@ static __init void test_atomic(void)
        RETURN_FAMILY_TEST(, sub_return, -=, onestwos);
        RETURN_FAMILY_TEST(, sub_return, -=, -one);
 
+       FETCH_FAMILY_TEST(, fetch_add, +=, onestwos);
+       FETCH_FAMILY_TEST(, fetch_add, +=, -one);
+       FETCH_FAMILY_TEST(, fetch_sub, -=, onestwos);
+       FETCH_FAMILY_TEST(, fetch_sub, -=, -one);
+
+       FETCH_FAMILY_TEST(, fetch_or,  |=, v1);
+       FETCH_FAMILY_TEST(, fetch_and, &=, v1);
+       FETCH_FAMILY_TEST(, fetch_andnot, &= ~, v1);
+       FETCH_FAMILY_TEST(, fetch_xor, ^=, v1);
+
        INC_RETURN_FAMILY_TEST(, v0);
        DEC_RETURN_FAMILY_TEST(, v0);
 
@@ -154,6 +178,16 @@ static __init void test_atomic64(void)
        RETURN_FAMILY_TEST(64, sub_return, -=, onestwos);
        RETURN_FAMILY_TEST(64, sub_return, -=, -one);
 
+       FETCH_FAMILY_TEST(64, fetch_add, +=, onestwos);
+       FETCH_FAMILY_TEST(64, fetch_add, +=, -one);
+       FETCH_FAMILY_TEST(64, fetch_sub, -=, onestwos);
+       FETCH_FAMILY_TEST(64, fetch_sub, -=, -one);
+
+       FETCH_FAMILY_TEST(64, fetch_or,  |=, v1);
+       FETCH_FAMILY_TEST(64, fetch_and, &=, v1);
+       FETCH_FAMILY_TEST(64, fetch_andnot, &= ~, v1);
+       FETCH_FAMILY_TEST(64, fetch_xor, ^=, v1);
+
        INIT(v0);
        atomic64_inc(&v);
        r += one;
index c66da508cbf7826a769d25f24786252c958482a4..eca88087fa8adc0f451dc99aa19e4c63067031b7 100644 (file)
@@ -14,9 +14,9 @@
 #include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/uaccess.h>
 
 #include <asm/page.h>
-#include <asm/uaccess.h>
 
 /*
  * bitmaps provide an array of bits, implemented using an an
index 9a5c1f2215585f35a8eea43eedd2566a6f459231..43273a7d83cf41621221354bc0d1b5680027780e 100644 (file)
@@ -9,6 +9,7 @@
  * The Hamming Weight of a number is the total number of bits set in it.
  */
 
+#ifndef __HAVE_ARCH_SW_HWEIGHT
 unsigned int __sw_hweight32(unsigned int w)
 {
 #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
@@ -25,6 +26,7 @@ unsigned int __sw_hweight32(unsigned int w)
 #endif
 }
 EXPORT_SYMBOL(__sw_hweight32);
+#endif
 
 unsigned int __sw_hweight16(unsigned int w)
 {
@@ -43,6 +45,7 @@ unsigned int __sw_hweight8(unsigned int w)
 }
 EXPORT_SYMBOL(__sw_hweight8);
 
+#ifndef __HAVE_ARCH_SW_HWEIGHT
 unsigned long __sw_hweight64(__u64 w)
 {
 #if BITS_PER_LONG == 32
@@ -65,3 +68,4 @@ unsigned long __sw_hweight64(__u64 w)
 #endif
 }
 EXPORT_SYMBOL(__sw_hweight64);
+#endif
index 510d1ce7d4d23287fe866d2fd6649d5d66215fb5..69ed593aab07315d90c4b2dc2e2fcbb9931211fd 100644 (file)
@@ -233,7 +233,6 @@ static void __prandom_timer(unsigned long dontcare)
 
 static void __init __prandom_start_seed_timer(void)
 {
-       set_timer_slack(&seed_timer, HZ);
        seed_timer.expires = jiffies + msecs_to_jiffies(40 * MSEC_PER_SEC);
        add_timer(&seed_timer);
 }
index 79bfe0e06907ac91875dddc9a3fbcf579b0c4d51..7bc04778f84dd6e2b68e4fadec74530f4bc64d57 100644 (file)
@@ -1009,8 +1009,6 @@ static void isolate_freepages(struct compact_control *cc)
                                block_end_pfn = block_start_pfn,
                                block_start_pfn -= pageblock_nr_pages,
                                isolate_start_pfn = block_start_pfn) {
-               unsigned long isolated;
-
                /*
                 * This can iterate a massively long zone without finding any
                 * suitable migration targets, so periodically check if we need
@@ -1034,36 +1032,30 @@ static void isolate_freepages(struct compact_control *cc)
                        continue;
 
                /* Found a block suitable for isolating free pages from. */
-               isolated = isolate_freepages_block(cc, &isolate_start_pfn,
-                                               block_end_pfn, freelist, false);
-               /* If isolation failed early, do not continue needlessly */
-               if (!isolated && isolate_start_pfn < block_end_pfn &&
-                   cc->nr_migratepages > cc->nr_freepages)
-                       break;
+               isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+                                       freelist, false);
 
                /*
-                * If we isolated enough freepages, or aborted due to async
-                * compaction being contended, terminate the loop.
-                * Remember where the free scanner should restart next time,
-                * which is where isolate_freepages_block() left off.
-                * But if it scanned the whole pageblock, isolate_start_pfn
-                * now points at block_end_pfn, which is the start of the next
-                * pageblock.
-                * In that case we will however want to restart at the start
-                * of the previous pageblock.
+                * If we isolated enough freepages, or aborted due to lock
+                * contention, terminate.
                 */
                if ((cc->nr_freepages >= cc->nr_migratepages)
                                                        || cc->contended) {
-                       if (isolate_start_pfn >= block_end_pfn)
+                       if (isolate_start_pfn >= block_end_pfn) {
+                               /*
+                                * Restart at previous pageblock if more
+                                * freepages can be isolated next time.
+                                */
                                isolate_start_pfn =
                                        block_start_pfn - pageblock_nr_pages;
+                       }
                        break;
-               } else {
+               } else if (isolate_start_pfn < block_end_pfn) {
                        /*
-                        * isolate_freepages_block() should not terminate
-                        * prematurely unless contended, or isolated enough
+                        * If isolation failed early, do not continue
+                        * needlessly.
                         */
-                       VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+                       break;
                }
        }
 
index 9ed58530f6957bef1e2e0fa166ed87085dcae523..343a2b7e57aa25f7d02709ff13ab8a5e47c68a87 100644 (file)
@@ -1624,14 +1624,9 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
        if (next - addr != HPAGE_PMD_SIZE) {
                get_page(page);
                spin_unlock(ptl);
-               if (split_huge_page(page)) {
-                       put_page(page);
-                       unlock_page(page);
-                       goto out_unlocked;
-               }
+               split_huge_page(page);
                put_page(page);
                unlock_page(page);
-               ret = 1;
                goto out_unlocked;
        }
 
@@ -2989,7 +2984,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
 }
 
 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
-               unsigned long address, bool freeze)
+               unsigned long address, bool freeze, struct page *page)
 {
        spinlock_t *ptl;
        struct mm_struct *mm = vma->vm_mm;
@@ -2997,8 +2992,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 
        mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
        ptl = pmd_lock(mm, pmd);
+
+       /*
+        * If caller asks to setup a migration entries, we need a page to check
+        * pmd against. Otherwise we can end up replacing wrong page.
+        */
+       VM_BUG_ON(freeze && !page);
+       if (page && page != pmd_page(*pmd))
+               goto out;
+
        if (pmd_trans_huge(*pmd)) {
-               struct page *page = pmd_page(*pmd);
+               page = pmd_page(*pmd);
                if (PageMlocked(page))
                        clear_page_mlock(page);
        } else if (!pmd_devmap(*pmd))
@@ -3025,24 +3029,8 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
                return;
 
        pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
-               return;
 
-       /*
-        * If caller asks to setup a migration entries, we need a page to check
-        * pmd against. Otherwise we can end up replacing wrong page.
-        */
-       VM_BUG_ON(freeze && !page);
-       if (page && page != pmd_page(*pmd))
-               return;
-
-       /*
-        * Caller holds the mmap_sem write mode or the anon_vma lock,
-        * so a huge pmd cannot materialize from under us (khugepaged
-        * holds both the mmap_sem write mode and the anon_vma lock
-        * write mode).
-        */
-       __split_huge_pmd(vma, pmd, address, freeze);
+       __split_huge_pmd(vma, pmd, address, freeze, page);
 }
 
 void vma_adjust_trans_huge(struct vm_area_struct *vma,
index c1f3c0be150a94f09588672ce44ade9047d78958..addfe4accc076817cdbc4009bb9f44d5e61315d4 100644 (file)
@@ -3383,7 +3383,7 @@ retry_avoidcopy:
        /* If no-one else is actually using this page, avoid the copy
         * and just make the page writable */
        if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
-               page_move_anon_rmap(old_page, vma, address);
+               page_move_anon_rmap(old_page, vma);
                set_huge_ptep_writable(vma, address, ptep);
                return 0;
        }
index 4973505a9bdde8fda4291debdf21bdadfaa76c3b..65793f150d1f6e1f77d32ad64d37966f0ad9704c 100644 (file)
@@ -238,30 +238,23 @@ static void qlist_move_cache(struct qlist_head *from,
                                   struct qlist_head *to,
                                   struct kmem_cache *cache)
 {
-       struct qlist_node *prev = NULL, *curr;
+       struct qlist_node *curr;
 
        if (unlikely(qlist_empty(from)))
                return;
 
        curr = from->head;
+       qlist_init(from);
        while (curr) {
-               struct qlist_node *qlink = curr;
-               struct kmem_cache *obj_cache = qlink_to_cache(qlink);
-
-               if (obj_cache == cache) {
-                       if (unlikely(from->head == qlink)) {
-                               from->head = curr->next;
-                               prev = curr;
-                       } else
-                               prev->next = curr->next;
-                       if (unlikely(from->tail == qlink))
-                               from->tail = curr->next;
-                       from->bytes -= cache->size;
-                       qlist_put(to, qlink, cache->size);
-               } else {
-                       prev = curr;
-               }
-               curr = curr->next;
+               struct qlist_node *next = curr->next;
+               struct kmem_cache *obj_cache = qlink_to_cache(curr);
+
+               if (obj_cache == cache)
+                       qlist_put(to, curr, obj_cache->size);
+               else
+                       qlist_put(from, curr, obj_cache->size);
+
+               curr = next;
        }
 }
 
index ac8664db38232f5ede345ee4b2f2f9ec0c5ac79d..5339c89dff6317510b2710e9ab2770c23ac71e1e 100644 (file)
@@ -4057,6 +4057,60 @@ static struct cftype mem_cgroup_legacy_files[] = {
        { },    /* terminate */
 };
 
+/*
+ * Private memory cgroup IDR
+ *
+ * Swap-out records and page cache shadow entries need to store memcg
+ * references in constrained space, so we maintain an ID space that is
+ * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
+ * memory-controlled cgroups to 64k.
+ *
+ * However, there usually are many references to the oflline CSS after
+ * the cgroup has been destroyed, such as page cache or reclaimable
+ * slab objects, that don't need to hang on to the ID. We want to keep
+ * those dead CSS from occupying IDs, or we might quickly exhaust the
+ * relatively small ID space and prevent the creation of new cgroups
+ * even when there are much fewer than 64k cgroups - possibly none.
+ *
+ * Maintain a private 16-bit ID space for memcg, and allow the ID to
+ * be freed and recycled when it's no longer needed, which is usually
+ * when the CSS is offlined.
+ *
+ * The only exception to that are records of swapped out tmpfs/shmem
+ * pages that need to be attributed to live ancestors on swapin. But
+ * those references are manageable from userspace.
+ */
+
+static DEFINE_IDR(mem_cgroup_idr);
+
+static void mem_cgroup_id_get(struct mem_cgroup *memcg)
+{
+       atomic_inc(&memcg->id.ref);
+}
+
+static void mem_cgroup_id_put(struct mem_cgroup *memcg)
+{
+       if (atomic_dec_and_test(&memcg->id.ref)) {
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
+               memcg->id.id = 0;
+
+               /* Memcg ID pins CSS */
+               css_put(&memcg->css);
+       }
+}
+
+/**
+ * mem_cgroup_from_id - look up a memcg from a memcg id
+ * @id: the memcg id to look up
+ *
+ * Caller must hold rcu_read_lock().
+ */
+struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
+{
+       WARN_ON_ONCE(!rcu_read_lock_held());
+       return idr_find(&mem_cgroup_idr, id);
+}
+
 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
 {
        struct mem_cgroup_per_node *pn;
@@ -4116,6 +4170,12 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
        if (!memcg)
                return NULL;
 
+       memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
+                                1, MEM_CGROUP_ID_MAX,
+                                GFP_KERNEL);
+       if (memcg->id.id < 0)
+               goto fail;
+
        memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
        if (!memcg->stat)
                goto fail;
@@ -4142,8 +4202,11 @@ static struct mem_cgroup *mem_cgroup_alloc(void)
 #ifdef CONFIG_CGROUP_WRITEBACK
        INIT_LIST_HEAD(&memcg->cgwb_list);
 #endif
+       idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
        return memcg;
 fail:
+       if (memcg->id.id > 0)
+               idr_remove(&mem_cgroup_idr, memcg->id.id);
        mem_cgroup_free(memcg);
        return NULL;
 }
@@ -4206,12 +4269,11 @@ fail:
        return ERR_PTR(-ENOMEM);
 }
 
-static int
-mem_cgroup_css_online(struct cgroup_subsys_state *css)
+static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
 {
-       if (css->id > MEM_CGROUP_ID_MAX)
-               return -ENOSPC;
-
+       /* Online state pins memcg ID, memcg ID pins CSS */
+       mem_cgroup_id_get(mem_cgroup_from_css(css));
+       css_get(css);
        return 0;
 }
 
@@ -4234,6 +4296,8 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
 
        memcg_offline_kmem(memcg);
        wb_memcg_offline(memcg);
+
+       mem_cgroup_id_put(memcg);
 }
 
 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
@@ -5756,6 +5820,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!memcg)
                return;
 
+       mem_cgroup_id_get(memcg);
        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
@@ -5774,6 +5839,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        VM_BUG_ON(!irqs_disabled());
        mem_cgroup_charge_statistics(memcg, page, false, -1);
        memcg_check_events(memcg, page);
+
+       if (!mem_cgroup_is_root(memcg))
+               css_put(&memcg->css);
 }
 
 /*
@@ -5804,11 +5872,11 @@ int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
            !page_counter_try_charge(&memcg->swap, 1, &counter))
                return -ENOMEM;
 
+       mem_cgroup_id_get(memcg);
        oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
        VM_BUG_ON_PAGE(oldid, page);
        mem_cgroup_swap_statistics(memcg, true);
 
-       css_get(&memcg->css);
        return 0;
 }
 
@@ -5837,7 +5905,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry)
                                page_counter_uncharge(&memcg->memsw, 1);
                }
                mem_cgroup_swap_statistics(memcg, false);
-               css_put(&memcg->css);
+               mem_cgroup_id_put(memcg);
        }
        rcu_read_unlock();
 }
index cd1f29e4897e3e5207071ed9cccabc55f86e6a87..9e046819e619487ed5a8e3cc65f4131812168692 100644 (file)
@@ -2399,8 +2399,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                                 * Protected against the rmap code by
                                 * the page lock.
                                 */
-                               page_move_anon_rmap(compound_head(old_page),
-                                                   vma, address);
+                               page_move_anon_rmap(old_page, vma);
                        }
                        unlock_page(old_page);
                        return wp_page_reuse(mm, vma, address, page_table, ptl,
index de2c1769cc68d6de4744307ee45201a2f2c35cc4..234edffec1d0791251c631fc596b798dc49b7c91 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2943,9 +2943,19 @@ static const char *special_mapping_name(struct vm_area_struct *vma)
        return ((struct vm_special_mapping *)vma->vm_private_data)->name;
 }
 
+static int special_mapping_mremap(struct vm_area_struct *new_vma)
+{
+       struct vm_special_mapping *sm = new_vma->vm_private_data;
+
+       if (sm->mremap)
+               return sm->mremap(sm, new_vma);
+       return 0;
+}
+
 static const struct vm_operations_struct special_mapping_vmops = {
        .close = special_mapping_close,
        .fault = special_mapping_fault,
+       .mremap = special_mapping_mremap,
        .name = special_mapping_name,
 };
 
index 6903b695ebaef81ef890f4b5e41c749d89dce2e1..8b3e1341b7544608cac4777a37bbd424432488e1 100644 (file)
@@ -286,7 +286,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
 /* Returns true if the struct page for the pfn is uninitialised */
 static inline bool __meminit early_page_uninitialised(unsigned long pfn)
 {
-       if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
+       int nid = early_pfn_to_nid(pfn);
+
+       if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
                return true;
 
        return false;
@@ -1273,7 +1275,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
        spin_lock(&early_pfn_lock);
        nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
        if (nid < 0)
-               nid = 0;
+               nid = first_online_node;
        spin_unlock(&early_pfn_lock);
 
        return nid;
index 0ea5d9071b32b967d012f36e600a2ee75acd8f3d..701b93fea2a0677cd8a58e646360747bf5626c5a 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1084,23 +1084,20 @@ EXPORT_SYMBOL_GPL(page_mkclean);
  * page_move_anon_rmap - move a page to our anon_vma
  * @page:      the page to move to our anon_vma
  * @vma:       the vma the page belongs to
- * @address:   the user virtual address mapped
  *
  * When a page belongs exclusively to one process after a COW event,
  * that page can be moved into the anon_vma that belongs to just that
  * process, so the rmap code will not search the parent or sibling
  * processes.
  */
-void page_move_anon_rmap(struct page *page,
-       struct vm_area_struct *vma, unsigned long address)
+void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
 
+       page = compound_head(page);
+
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        VM_BUG_ON_VMA(!anon_vma, vma);
-       if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
-               address &= HPAGE_PMD_MASK;
-       VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
 
        anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
        /*
@@ -1427,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                        goto out;
        }
 
-       pte = page_check_address(page, mm, address, &ptl, 0);
+       pte = page_check_address(page, mm, address, &ptl,
+                                PageTransCompound(page));
        if (!pte)
                goto out;
 
index 24463b67b6efa5817e7c1e806d1bf6337d300ba2..171dee7a131f6959fe444405f280374be7eb25d0 100644 (file)
@@ -2225,9 +2225,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                        error = shmem_getpage(inode, index, &page, SGP_FALLOC);
                if (error) {
                        /* Remove the !PageUptodate pages we added */
-                       shmem_undo_range(inode,
-                               (loff_t)start << PAGE_SHIFT,
-                               ((loff_t)index << PAGE_SHIFT) - 1, true);
+                       if (index > start) {
+                               shmem_undo_range(inode,
+                                   (loff_t)start << PAGE_SHIFT,
+                                   ((loff_t)index << PAGE_SHIFT) - 1, true);
+                       }
                        goto undone;
                }
 
index a65dad7fdcd12495a51eabd91fc76ed96edb0576..82317abb03edc7aa2c89e0a20032fc57a532a723 100644 (file)
@@ -526,8 +526,8 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg,
                goto out_unlock;
 
        cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
-       cache_name = kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name,
-                              css->id, memcg_name_buf);
+       cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
+                              css->serial_nr, memcg_name_buf);
        if (!cache_name)
                goto out_unlock;
 
index 8a75f8d2916af99a1efb6101666038dcd4309c04..577277546d985db41aee898a3c69b1b0488d289b 100644 (file)
@@ -491,7 +491,7 @@ static int __init workingset_init(void)
        max_order = fls_long(totalram_pages - 1);
        if (max_order > timestamp_bits)
                bucket_order = max_order - timestamp_bits;
-       printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
+       pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
               timestamp_bits, max_order, bucket_order);
 
        ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
index 86ae75b77390964ee42056c0dd1763650d9fb9ff..516b0e73263c7cac1db1546d05eb6a1d6f642ab5 100644 (file)
@@ -146,10 +146,12 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
 
 static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
-       /* TODO: gotta make sure the underlying layer can handle it,
-        * maybe an IFF_VLAN_CAPABLE flag for devices?
-        */
-       if (vlan_dev_priv(dev)->real_dev->mtu < new_mtu)
+       struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
+       unsigned int max_mtu = real_dev->mtu;
+
+       if (netif_reduces_vlan_mtu(real_dev))
+               max_mtu -= VLAN_HLEN;
+       if (max_mtu < new_mtu)
                return -ERANGE;
 
        dev->mtu = new_mtu;
index c92b52f37d38de143022f172881dd03f076b0194..1270207f3d7c9dd6fde0e1f7839acfe18a4d82c9 100644 (file)
@@ -118,6 +118,7 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
 {
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        struct net_device *real_dev;
+       unsigned int max_mtu;
        __be16 proto;
        int err;
 
@@ -144,9 +145,11 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,
        if (err < 0)
                return err;
 
+       max_mtu = netif_reduces_vlan_mtu(real_dev) ? real_dev->mtu - VLAN_HLEN :
+                                                    real_dev->mtu;
        if (!tb[IFLA_MTU])
-               dev->mtu = real_dev->mtu;
-       else if (dev->mtu > real_dev->mtu)
+               dev->mtu = max_mtu;
+       else if (dev->mtu > max_mtu)
                return -EINVAL;
 
        err = vlan_changelink(dev, tb, data);
index 748a9ead7ce50fd65a0a09f2b19eaa035a566327..825a5cdf4382d08ff3b90c7499fd605f940b150b 100644 (file)
@@ -177,10 +177,21 @@ static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
 static void batadv_claim_release(struct kref *ref)
 {
        struct batadv_bla_claim *claim;
+       struct batadv_bla_backbone_gw *old_backbone_gw;
 
        claim = container_of(ref, struct batadv_bla_claim, refcount);
 
-       batadv_backbone_gw_put(claim->backbone_gw);
+       spin_lock_bh(&claim->backbone_lock);
+       old_backbone_gw = claim->backbone_gw;
+       claim->backbone_gw = NULL;
+       spin_unlock_bh(&claim->backbone_lock);
+
+       spin_lock_bh(&old_backbone_gw->crc_lock);
+       old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+       spin_unlock_bh(&old_backbone_gw->crc_lock);
+
+       batadv_backbone_gw_put(old_backbone_gw);
+
        kfree_rcu(claim, rcu);
 }
 
@@ -418,9 +429,12 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
                break;
        }
 
-       if (vid & BATADV_VLAN_HAS_TAG)
+       if (vid & BATADV_VLAN_HAS_TAG) {
                skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
                                      vid & VLAN_VID_MASK);
+               if (!skb)
+                       goto out;
+       }
 
        skb_reset_mac_header(skb);
        skb->protocol = eth_type_trans(skb, soft_iface);
@@ -674,8 +688,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                                 const u8 *mac, const unsigned short vid,
                                 struct batadv_bla_backbone_gw *backbone_gw)
 {
+       struct batadv_bla_backbone_gw *old_backbone_gw;
        struct batadv_bla_claim *claim;
        struct batadv_bla_claim search_claim;
+       bool remove_crc = false;
        int hash_added;
 
        ether_addr_copy(search_claim.addr, mac);
@@ -689,8 +705,10 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                        return;
 
                ether_addr_copy(claim->addr, mac);
+               spin_lock_init(&claim->backbone_lock);
                claim->vid = vid;
                claim->lasttime = jiffies;
+               kref_get(&backbone_gw->refcount);
                claim->backbone_gw = backbone_gw;
 
                kref_init(&claim->refcount);
@@ -718,15 +736,26 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
                           "bla_add_claim(): changing ownership for %pM, vid %d\n",
                           mac, BATADV_PRINT_VID(vid));
 
-               spin_lock_bh(&claim->backbone_gw->crc_lock);
-               claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
-               spin_unlock_bh(&claim->backbone_gw->crc_lock);
-               batadv_backbone_gw_put(claim->backbone_gw);
+               remove_crc = true;
        }
-       /* set (new) backbone gw */
+
+       /* replace backbone_gw atomically and adjust reference counters */
+       spin_lock_bh(&claim->backbone_lock);
+       old_backbone_gw = claim->backbone_gw;
        kref_get(&backbone_gw->refcount);
        claim->backbone_gw = backbone_gw;
+       spin_unlock_bh(&claim->backbone_lock);
 
+       if (remove_crc) {
+               /* remove claim address from old backbone_gw */
+               spin_lock_bh(&old_backbone_gw->crc_lock);
+               old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
+               spin_unlock_bh(&old_backbone_gw->crc_lock);
+       }
+
+       batadv_backbone_gw_put(old_backbone_gw);
+
+       /* add claim address to new backbone_gw */
        spin_lock_bh(&backbone_gw->crc_lock);
        backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
        spin_unlock_bh(&backbone_gw->crc_lock);
@@ -736,6 +765,26 @@ claim_free_ref:
        batadv_claim_put(claim);
 }
 
+/**
+ * batadv_bla_claim_get_backbone_gw - Get valid reference for backbone_gw of
+ *  claim
+ * @claim: claim whose backbone_gw should be returned
+ *
+ * Return: valid reference to claim::backbone_gw
+ */
+static struct batadv_bla_backbone_gw *
+batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
+{
+       struct batadv_bla_backbone_gw *backbone_gw;
+
+       spin_lock_bh(&claim->backbone_lock);
+       backbone_gw = claim->backbone_gw;
+       kref_get(&backbone_gw->refcount);
+       spin_unlock_bh(&claim->backbone_lock);
+
+       return backbone_gw;
+}
+
 /**
  * batadv_bla_del_claim - delete a claim from the claim hash
  * @bat_priv: the bat priv with all the soft interface information
@@ -760,10 +809,6 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                           batadv_choose_claim, claim);
        batadv_claim_put(claim); /* reference from the hash is gone */
 
-       spin_lock_bh(&claim->backbone_gw->crc_lock);
-       claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
-       spin_unlock_bh(&claim->backbone_gw->crc_lock);
-
        /* don't need the reference from hash_find() anymore */
        batadv_claim_put(claim);
 }
@@ -1216,6 +1261,7 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    int now)
 {
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_bla_claim *claim;
        struct hlist_head *head;
        struct batadv_hashtable *hash;
@@ -1230,14 +1276,17 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(claim, head, hash_entry) {
+                       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
                        if (now)
                                goto purge_now;
-                       if (!batadv_compare_eth(claim->backbone_gw->orig,
+
+                       if (!batadv_compare_eth(backbone_gw->orig,
                                                primary_if->net_dev->dev_addr))
-                               continue;
+                               goto skip;
+
                        if (!batadv_has_timed_out(claim->lasttime,
                                                  BATADV_BLA_CLAIM_TIMEOUT))
-                               continue;
+                               goto skip;
 
                        batadv_dbg(BATADV_DBG_BLA, bat_priv,
                                   "bla_purge_claims(): %pM, vid %d, time out\n",
@@ -1245,8 +1294,10 @@ static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
 
 purge_now:
                        batadv_handle_unclaim(bat_priv, primary_if,
-                                             claim->backbone_gw->orig,
+                                             backbone_gw->orig,
                                              claim->addr, claim->vid);
+skip:
+                       batadv_backbone_gw_put(backbone_gw);
                }
                rcu_read_unlock();
        }
@@ -1757,9 +1808,11 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                   unsigned short vid, bool is_bcast)
 {
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
        struct batadv_hard_iface *primary_if;
+       bool own_claim;
        bool ret;
 
        ethhdr = eth_hdr(skb);
@@ -1794,8 +1847,12 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
        }
 
        /* if it is our own claim ... */
-       if (batadv_compare_eth(claim->backbone_gw->orig,
-                              primary_if->net_dev->dev_addr)) {
+       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+       own_claim = batadv_compare_eth(backbone_gw->orig,
+                                      primary_if->net_dev->dev_addr);
+       batadv_backbone_gw_put(backbone_gw);
+
+       if (own_claim) {
                /* ... allow it in any case */
                claim->lasttime = jiffies;
                goto allow;
@@ -1859,7 +1916,9 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
 {
        struct ethhdr *ethhdr;
        struct batadv_bla_claim search_claim, *claim = NULL;
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_hard_iface *primary_if;
+       bool client_roamed;
        bool ret = false;
 
        primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -1889,8 +1948,12 @@ bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                goto allow;
 
        /* check if we are responsible. */
-       if (batadv_compare_eth(claim->backbone_gw->orig,
-                              primary_if->net_dev->dev_addr)) {
+       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+       client_roamed = batadv_compare_eth(backbone_gw->orig,
+                                          primary_if->net_dev->dev_addr);
+       batadv_backbone_gw_put(backbone_gw);
+
+       if (client_roamed) {
                /* if yes, the client has roamed and we have
                 * to unclaim it.
                 */
@@ -1938,6 +2001,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
        struct net_device *net_dev = (struct net_device *)seq->private;
        struct batadv_priv *bat_priv = netdev_priv(net_dev);
        struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
+       struct batadv_bla_backbone_gw *backbone_gw;
        struct batadv_bla_claim *claim;
        struct batadv_hard_iface *primary_if;
        struct hlist_head *head;
@@ -1962,17 +2026,21 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 
                rcu_read_lock();
                hlist_for_each_entry_rcu(claim, head, hash_entry) {
-                       is_own = batadv_compare_eth(claim->backbone_gw->orig,
+                       backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
+
+                       is_own = batadv_compare_eth(backbone_gw->orig,
                                                    primary_addr);
 
-                       spin_lock_bh(&claim->backbone_gw->crc_lock);
-                       backbone_crc = claim->backbone_gw->crc;
-                       spin_unlock_bh(&claim->backbone_gw->crc_lock);
+                       spin_lock_bh(&backbone_gw->crc_lock);
+                       backbone_crc = backbone_gw->crc;
+                       spin_unlock_bh(&backbone_gw->crc_lock);
                        seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
                                   claim->addr, BATADV_PRINT_VID(claim->vid),
-                                  claim->backbone_gw->orig,
+                                  backbone_gw->orig,
                                   (is_own ? 'x' : ' '),
                                   backbone_crc);
+
+                       batadv_backbone_gw_put(backbone_gw);
                }
                rcu_read_unlock();
        }
index 278800a99c69458c083da635bd39bcd19e788842..aee3b39914717390ed4fe2899a781b6e7625451f 100644 (file)
@@ -1009,9 +1009,12 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                if (!skb_new)
                        goto out;
 
-               if (vid & BATADV_VLAN_HAS_TAG)
+               if (vid & BATADV_VLAN_HAS_TAG) {
                        skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
                                                  vid & VLAN_VID_MASK);
+                       if (!skb_new)
+                               goto out;
+               }
 
                skb_reset_mac_header(skb_new);
                skb_new->protocol = eth_type_trans(skb_new,
@@ -1089,9 +1092,12 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
         */
        skb_reset_mac_header(skb_new);
 
-       if (vid & BATADV_VLAN_HAS_TAG)
+       if (vid & BATADV_VLAN_HAS_TAG) {
                skb_new = vlan_insert_tag(skb_new, htons(ETH_P_8021Q),
                                          vid & VLAN_VID_MASK);
+               if (!skb_new)
+                       goto out;
+       }
 
        /* To preserve backwards compatibility, the node has choose the outgoing
         * format based on the incoming request packet type. The assumption is
index 7f51bc2c06eb7699492c41f2fe46fe85b26147a2..ab8c4f9738fe2206f878ed9f0dfda8803455ec5b 100644 (file)
@@ -765,6 +765,8 @@ static void batadv_orig_node_release(struct kref *ref)
        struct batadv_neigh_node *neigh_node;
        struct batadv_orig_node *orig_node;
        struct batadv_orig_ifinfo *orig_ifinfo;
+       struct batadv_orig_node_vlan *vlan;
+       struct batadv_orig_ifinfo *last_candidate;
 
        orig_node = container_of(ref, struct batadv_orig_node, refcount);
 
@@ -782,8 +784,21 @@ static void batadv_orig_node_release(struct kref *ref)
                hlist_del_rcu(&orig_ifinfo->list);
                batadv_orig_ifinfo_put(orig_ifinfo);
        }
+
+       last_candidate = orig_node->last_bonding_candidate;
+       orig_node->last_bonding_candidate = NULL;
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
+       if (last_candidate)
+               batadv_orig_ifinfo_put(last_candidate);
+
+       spin_lock_bh(&orig_node->vlan_list_lock);
+       hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
+               hlist_del_rcu(&vlan->list);
+               batadv_orig_node_vlan_put(vlan);
+       }
+       spin_unlock_bh(&orig_node->vlan_list_lock);
+
        /* Free nc_nodes */
        batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
 
index 6c2901a86230ec231a1ee5338c6282da9e20276d..bfac086b4d015053f88dee70e63bebd1ae9b3c56 100644 (file)
@@ -455,6 +455,29 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
        return 0;
 }
 
+/**
+ * batadv_last_bonding_replace - Replace last_bonding_candidate of orig_node
+ * @orig_node: originator node whose bonding candidates should be replaced
+ * @new_candidate: new bonding candidate or NULL
+ */
+static void
+batadv_last_bonding_replace(struct batadv_orig_node *orig_node,
+                           struct batadv_orig_ifinfo *new_candidate)
+{
+       struct batadv_orig_ifinfo *old_candidate;
+
+       spin_lock_bh(&orig_node->neigh_list_lock);
+       old_candidate = orig_node->last_bonding_candidate;
+
+       if (new_candidate)
+               kref_get(&new_candidate->refcount);
+       orig_node->last_bonding_candidate = new_candidate;
+       spin_unlock_bh(&orig_node->neigh_list_lock);
+
+       if (old_candidate)
+               batadv_orig_ifinfo_put(old_candidate);
+}
+
 /**
  * batadv_find_router - find a suitable router for this originator
  * @bat_priv: the bat priv with all the soft interface information
@@ -562,10 +585,6 @@ next:
        }
        rcu_read_unlock();
 
-       /* last_bonding_candidate is reset below, remove the old reference. */
-       if (orig_node->last_bonding_candidate)
-               batadv_orig_ifinfo_put(orig_node->last_bonding_candidate);
-
        /* After finding candidates, handle the three cases:
         * 1) there is a next candidate, use that
         * 2) there is no next candidate, use the first of the list
@@ -574,21 +593,28 @@ next:
        if (next_candidate) {
                batadv_neigh_node_put(router);
 
-               /* remove references to first candidate, we don't need it. */
-               if (first_candidate) {
-                       batadv_neigh_node_put(first_candidate_router);
-                       batadv_orig_ifinfo_put(first_candidate);
-               }
+               kref_get(&next_candidate_router->refcount);
                router = next_candidate_router;
-               orig_node->last_bonding_candidate = next_candidate;
+               batadv_last_bonding_replace(orig_node, next_candidate);
        } else if (first_candidate) {
                batadv_neigh_node_put(router);
 
-               /* refcounting has already been done in the loop above. */
+               kref_get(&first_candidate_router->refcount);
                router = first_candidate_router;
-               orig_node->last_bonding_candidate = first_candidate;
+               batadv_last_bonding_replace(orig_node, first_candidate);
        } else {
-               orig_node->last_bonding_candidate = NULL;
+               batadv_last_bonding_replace(orig_node, NULL);
+       }
+
+       /* cleanup of candidates */
+       if (first_candidate) {
+               batadv_neigh_node_put(first_candidate_router);
+               batadv_orig_ifinfo_put(first_candidate);
+       }
+
+       if (next_candidate) {
+               batadv_neigh_node_put(next_candidate_router);
+               batadv_orig_ifinfo_put(next_candidate);
        }
 
        return router;
index f2f125684ed9c199e1c4f7f7b3b6109b3b9a51c2..010397650fa5b7c2c4e4912dc680440dbaf7e21d 100644 (file)
@@ -424,8 +424,8 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
        struct batadv_orig_node *orig_node;
 
        orig_node = batadv_gw_get_selected_orig(bat_priv);
-       return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
-                                      orig_node, vid);
+       return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST_4ADDR,
+                                      BATADV_P_DATA, orig_node, vid);
 }
 
 void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
index ba846b078af82d9858a733e9253863edb5c6cb44..74d865a4df464f03a54f1d7f9f87ad5a095ad088 100644 (file)
@@ -330,7 +330,9 @@ struct batadv_orig_node {
        DECLARE_BITMAP(bcast_bits, BATADV_TQ_LOCAL_WINDOW_SIZE);
        u32 last_bcast_seqno;
        struct hlist_head neigh_list;
-       /* neigh_list_lock protects: neigh_list and router */
+       /* neigh_list_lock protects: neigh_list, ifinfo_list,
+        * last_bonding_candidate and router
+        */
        spinlock_t neigh_list_lock;
        struct hlist_node hash_entry;
        struct batadv_priv *bat_priv;
@@ -1042,6 +1044,7 @@ struct batadv_bla_backbone_gw {
  * @addr: mac address of claimed non-mesh client
  * @vid: vlan id this client was detected on
  * @backbone_gw: pointer to backbone gw claiming this client
+ * @backbone_lock: lock protecting backbone_gw pointer
  * @lasttime: last time we heard of claim (locals only)
  * @hash_entry: hlist node for batadv_priv_bla::claim_hash
  * @refcount: number of contexts the object is used
@@ -1051,6 +1054,7 @@ struct batadv_bla_claim {
        u8 addr[ETH_ALEN];
        unsigned short vid;
        struct batadv_bla_backbone_gw *backbone_gw;
+       spinlock_t backbone_lock; /* protects backbone_gw */
        unsigned long lasttime;
        struct hlist_node hash_entry;
        struct rcu_head rcu;
index 2d25979273a6f57378da645460d9d6c2a0d91e5c..77e7f69bf80d4ca8e31e09b5b07230bca1abf170 100644 (file)
@@ -700,7 +700,7 @@ static int
 br_nf_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                  int (*output)(struct net *, struct sock *, struct sk_buff *))
 {
-       unsigned int mtu = ip_skb_dst_mtu(skb);
+       unsigned int mtu = ip_skb_dst_mtu(sk, skb);
        struct iphdr *iph = ip_hdr(skb);
 
        if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
index 03062bb763b34cc64eb7c0b38c042801e0be0b1c..7e480bf75bcf5bfd69bd89cdccf29fc082a15e16 100644 (file)
@@ -1260,6 +1260,115 @@ struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
        return map;
 }
 
+/*
+ * Encoding order is (new_up_client, new_state, new_weight).  Need to
+ * apply in the (new_weight, new_state, new_up_client) order, because
+ * an incremental map may look like e.g.
+ *
+ *     new_up_client: { osd=6, addr=... } # set osd_state and addr
+ *     new_state: { osd=6, xorstate=EXISTS } # clear osd_state
+ */
+static int decode_new_up_state_weight(void **p, void *end,
+                                     struct ceph_osdmap *map)
+{
+       void *new_up_client;
+       void *new_state;
+       void *new_weight_end;
+       u32 len;
+
+       new_up_client = *p;
+       ceph_decode_32_safe(p, end, len, e_inval);
+       len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
+       ceph_decode_need(p, end, len, e_inval);
+       *p += len;
+
+       new_state = *p;
+       ceph_decode_32_safe(p, end, len, e_inval);
+       len *= sizeof(u32) + sizeof(u8);
+       ceph_decode_need(p, end, len, e_inval);
+       *p += len;
+
+       /* new_weight */
+       ceph_decode_32_safe(p, end, len, e_inval);
+       while (len--) {
+               s32 osd;
+               u32 w;
+
+               ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
+               osd = ceph_decode_32(p);
+               w = ceph_decode_32(p);
+               BUG_ON(osd >= map->max_osd);
+               pr_info("osd%d weight 0x%x %s\n", osd, w,
+                    w == CEPH_OSD_IN ? "(in)" :
+                    (w == CEPH_OSD_OUT ? "(out)" : ""));
+               map->osd_weight[osd] = w;
+
+               /*
+                * If we are marking in, set the EXISTS, and clear the
+                * AUTOOUT and NEW bits.
+                */
+               if (w) {
+                       map->osd_state[osd] |= CEPH_OSD_EXISTS;
+                       map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
+                                                CEPH_OSD_NEW);
+               }
+       }
+       new_weight_end = *p;
+
+       /* new_state (up/down) */
+       *p = new_state;
+       len = ceph_decode_32(p);
+       while (len--) {
+               s32 osd;
+               u8 xorstate;
+               int ret;
+
+               osd = ceph_decode_32(p);
+               xorstate = ceph_decode_8(p);
+               if (xorstate == 0)
+                       xorstate = CEPH_OSD_UP;
+               BUG_ON(osd >= map->max_osd);
+               if ((map->osd_state[osd] & CEPH_OSD_UP) &&
+                   (xorstate & CEPH_OSD_UP))
+                       pr_info("osd%d down\n", osd);
+               if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
+                   (xorstate & CEPH_OSD_EXISTS)) {
+                       pr_info("osd%d does not exist\n", osd);
+                       map->osd_weight[osd] = CEPH_OSD_IN;
+                       ret = set_primary_affinity(map, osd,
+                                                  CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
+                       if (ret)
+                               return ret;
+                       memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
+                       map->osd_state[osd] = 0;
+               } else {
+                       map->osd_state[osd] ^= xorstate;
+               }
+       }
+
+       /* new_up_client */
+       *p = new_up_client;
+       len = ceph_decode_32(p);
+       while (len--) {
+               s32 osd;
+               struct ceph_entity_addr addr;
+
+               osd = ceph_decode_32(p);
+               ceph_decode_copy(p, &addr, sizeof(addr));
+               ceph_decode_addr(&addr);
+               BUG_ON(osd >= map->max_osd);
+               pr_info("osd%d up\n", osd);
+               map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
+               map->osd_addr[osd] = addr;
+       }
+
+       *p = new_weight_end;
+       return 0;
+
+e_inval:
+       return -EINVAL;
+}
+
 /*
  * decode and apply an incremental map update.
  */
@@ -1358,49 +1467,10 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
                        __remove_pg_pool(&map->pg_pools, pi);
        }
 
-       /* new_up */
-       ceph_decode_32_safe(p, end, len, e_inval);
-       while (len--) {
-               u32 osd;
-               struct ceph_entity_addr addr;
-               ceph_decode_32_safe(p, end, osd, e_inval);
-               ceph_decode_copy_safe(p, end, &addr, sizeof(addr), e_inval);
-               ceph_decode_addr(&addr);
-               pr_info("osd%d up\n", osd);
-               BUG_ON(osd >= map->max_osd);
-               map->osd_state[osd] |= CEPH_OSD_UP | CEPH_OSD_EXISTS;
-               map->osd_addr[osd] = addr;
-       }
-
-       /* new_state */
-       ceph_decode_32_safe(p, end, len, e_inval);
-       while (len--) {
-               u32 osd;
-               u8 xorstate;
-               ceph_decode_32_safe(p, end, osd, e_inval);
-               xorstate = **(u8 **)p;
-               (*p)++;  /* clean flag */
-               if (xorstate == 0)
-                       xorstate = CEPH_OSD_UP;
-               if (xorstate & CEPH_OSD_UP)
-                       pr_info("osd%d down\n", osd);
-               if (osd < map->max_osd)
-                       map->osd_state[osd] ^= xorstate;
-       }
-
-       /* new_weight */
-       ceph_decode_32_safe(p, end, len, e_inval);
-       while (len--) {
-               u32 osd, off;
-               ceph_decode_need(p, end, sizeof(u32)*2, e_inval);
-               osd = ceph_decode_32(p);
-               off = ceph_decode_32(p);
-               pr_info("osd%d weight 0x%x %s\n", osd, off,
-                    off == CEPH_OSD_IN ? "(in)" :
-                    (off == CEPH_OSD_OUT ? "(out)" : ""));
-               if (osd < map->max_osd)
-                       map->osd_weight[osd] = off;
-       }
+       /* new_up_client, new_state, new_weight */
+       err = decode_new_up_state_weight(p, end, map);
+       if (err)
+               goto bad;
 
        /* new_pg_temp */
        err = decode_new_pg_temp(p, end, map);
index c4b330c85c02d6bfc1b1ce12dafb42d10b264584..e759d90e8cef037ec117a7e70941c3be050e3103 100644 (file)
 #include <net/sock_reuseport.h>
 
 /**
- *     sk_filter - run a packet through a socket filter
+ *     sk_filter_trim_cap - run a packet through a socket filter
  *     @sk: sock associated with &sk_buff
  *     @skb: buffer to filter
+ *     @cap: limit on how short the eBPF program may trim the packet
  *
  * Run the eBPF program and then cut skb->data to correct size returned by
  * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
@@ -64,7 +65,7 @@
  * be accepted or -EPERM if the packet should be tossed.
  *
  */
-int sk_filter(struct sock *sk, struct sk_buff *skb)
+int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
 {
        int err;
        struct sk_filter *filter;
@@ -85,14 +86,13 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
        filter = rcu_dereference(sk->sk_filter);
        if (filter) {
                unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
-
-               err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+               err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
        }
        rcu_read_unlock();
 
        return err;
 }
-EXPORT_SYMBOL(sk_filter);
+EXPORT_SYMBOL(sk_filter_trim_cap);
 
 static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 {
index a669dea146c61b2f7f5b1feaf46512b135ee28f7..61ad43f61c5edbffa48f4983d8bf1a7472a66cdc 100644 (file)
@@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
 }
 EXPORT_SYMBOL(make_flow_keys_digest);
 
+static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
+
+u32 __skb_get_hash_symmetric(struct sk_buff *skb)
+{
+       struct flow_keys keys;
+
+       __flow_hash_secret_init();
+
+       memset(&keys, 0, sizeof(keys));
+       __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
+                          NULL, 0, 0, 0,
+                          FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
+
+       return __flow_hash_from_keys(&keys, hashrnd);
+}
+EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
+
 /**
  * __skb_get_hash: calculate a flow hash
  * @skb: sk_buff to calculate flow hash from
@@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
        },
 };
 
+static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
+       {
+               .key_id = FLOW_DISSECTOR_KEY_CONTROL,
+               .offset = offsetof(struct flow_keys, control),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_BASIC,
+               .offset = offsetof(struct flow_keys, basic),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v4addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+               .offset = offsetof(struct flow_keys, addrs.v6addrs),
+       },
+       {
+               .key_id = FLOW_DISSECTOR_KEY_PORTS,
+               .offset = offsetof(struct flow_keys, ports),
+       },
+};
+
 static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
        {
                .key_id = FLOW_DISSECTOR_KEY_CONTROL,
@@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
        skb_flow_dissector_init(&flow_keys_dissector,
                                flow_keys_dissector_keys,
                                ARRAY_SIZE(flow_keys_dissector_keys));
+       skb_flow_dissector_init(&flow_keys_dissector_symmetric,
+                               flow_keys_dissector_symmetric_keys,
+                               ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
        skb_flow_dissector_init(&flow_keys_buf_dissector,
                                flow_keys_buf_dissector_keys,
                                ARRAY_SIZE(flow_keys_buf_dissector_keys));
index f2b77e549c03a771909cd9c87c40ec2b7826cd31..eb12d2161fb2b5f9f26ebb8f534bbd6a673b65b1 100644 (file)
@@ -3015,24 +3015,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
 }
 EXPORT_SYMBOL_GPL(skb_append_pagefrags);
 
-/**
- *     skb_push_rcsum - push skb and update receive checksum
- *     @skb: buffer to update
- *     @len: length of data pulled
- *
- *     This function performs an skb_push on the packet and updates
- *     the CHECKSUM_COMPLETE checksum.  It should be used on
- *     receive path processing instead of skb_push unless you know
- *     that the checksum difference is zero (e.g., a valid IP header)
- *     or you are setting ip_summed to CHECKSUM_NONE.
- */
-static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
-{
-       skb_push(skb, len);
-       skb_postpush_rcsum(skb, skb->data, len);
-       return skb->data;
-}
-
 /**
  *     skb_pull_rcsum - pull skb and update receive checksum
  *     @skb: buffer to update
index 08bf97eceeb3827d0b237d8c01910e5e0a0f5d6a..25dab8b60223e25ee92dae45a226fce2a6bb5a03 100644 (file)
@@ -452,11 +452,12 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sock_queue_rcv_skb);
 
-int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+                    const int nested, unsigned int trim_cap)
 {
        int rc = NET_RX_SUCCESS;
 
-       if (sk_filter(sk, skb))
+       if (sk_filter_trim_cap(sk, skb, trim_cap))
                goto discard_and_relse;
 
        skb->dev = NULL;
@@ -492,7 +493,7 @@ discard_and_relse:
        kfree_skb(skb);
        goto out;
 }
-EXPORT_SYMBOL(sk_receive_skb);
+EXPORT_SYMBOL(__sk_receive_skb);
 
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
 {
@@ -1938,6 +1939,10 @@ int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
                sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK;
                sockc->tsflags |= tsflags;
                break;
+       /* SCM_RIGHTS and SCM_CREDENTIALS are semantically in SOL_UNIX. */
+       case SCM_RIGHTS:
+       case SCM_CREDENTIALS:
+               break;
        default:
                return -EINVAL;
        }
index 5c7e413a3ae407e67565b48a8bd6f43e3b02de4d..345a3aeb8c7e36449a765298cd6512eab8cfef4b 100644 (file)
@@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
        security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
        if (IS_ERR(rt)) {
-               __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
+               IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
                return NULL;
        }
 
@@ -527,17 +527,19 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
                                                                 rxiph->daddr);
        skb_dst_set(skb, dst_clone(dst));
 
+       local_bh_disable();
        bh_lock_sock(ctl_sk);
        err = ip_build_and_send_pkt(skb, ctl_sk,
                                    rxiph->daddr, rxiph->saddr, NULL);
        bh_unlock_sock(ctl_sk);
 
        if (net_xmit_eval(err) == 0) {
-               DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
-               DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
+               __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
+               __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
        }
+       local_bh_enable();
 out:
-        dst_release(dst);
+       dst_release(dst);
 }
 
 static void dccp_v4_reqsk_destructor(struct request_sock *req)
@@ -866,7 +868,7 @@ lookup:
                goto discard_and_relse;
        nf_reset(skb);
 
-       return sk_receive_skb(sk, skb, 1);
+       return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4);
 
 no_dccp_socket:
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
index d176f4e66369a399f5fe8a440eb864dbac9c7542..3ff137d9471d8420748fd72c8631d67eb57ffb66 100644 (file)
@@ -732,7 +732,7 @@ lookup:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
 
-       return sk_receive_skb(sk, skb, 1) ? -1 : 0;
+       return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0;
 
 no_dccp_socket:
        if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
index df48034378889eac83c6b262a76a32028202efb4..a796fc7cbc3542972eaa19bbc3d3dc07a428ff1c 100644 (file)
@@ -41,6 +41,7 @@
 #include <net/dn_fib.h>
 #include <net/dn_neigh.h>
 #include <net/dn_dev.h>
+#include <net/nexthop.h>
 
 #define RT_MIN_TABLE 1
 
@@ -150,14 +151,13 @@ static int dn_fib_count_nhs(const struct nlattr *attr)
        struct rtnexthop *nhp = nla_data(attr);
        int nhs = 0, nhlen = nla_len(attr);
 
-       while(nhlen >= (int)sizeof(struct rtnexthop)) {
-               if ((nhlen -= nhp->rtnh_len) < 0)
-                       return 0;
+       while (rtnh_ok(nhp, nhlen)) {
                nhs++;
-               nhp = RTNH_NEXT(nhp);
+               nhp = rtnh_next(nhp, &nhlen);
        }
 
-       return nhs;
+       /* leftover implies invalid nexthop configuration, discard it */
+       return nhlen > 0 ? 0 : nhs;
 }
 
 static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
@@ -167,21 +167,24 @@ static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
        int nhlen = nla_len(attr);
 
        change_nexthops(fi) {
-               int attrlen = nhlen - sizeof(struct rtnexthop);
-               if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
+               int attrlen;
+
+               if (!rtnh_ok(nhp, nhlen))
                        return -EINVAL;
 
                nh->nh_flags  = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
                nh->nh_oif    = nhp->rtnh_ifindex;
                nh->nh_weight = nhp->rtnh_hops + 1;
 
-               if (attrlen) {
+               attrlen = rtnh_attrlen(nhp);
+               if (attrlen > 0) {
                        struct nlattr *gw_attr;
 
                        gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
                        nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
                }
-               nhp = RTNH_NEXT(nhp);
+
+               nhp = rtnh_next(nhp, &nhlen);
        } endfor_nexthops(fi);
 
        return 0;
index d09173bf95005be87d4d0cbcc754147b3c8e62c9..539fa264e67d71148364c9fc0e694c78fd35e69b 100644 (file)
@@ -479,6 +479,9 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
                if (!rtnh_ok(rtnh, remaining))
                        return -EINVAL;
 
+               if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+                       return -EINVAL;
+
                nexthop_nh->nh_flags =
                        (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
                nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
@@ -1003,6 +1006,9 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
        if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
                goto err_inval;
 
+       if (cfg->fc_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN))
+               goto err_inval;
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        if (cfg->fc_mp) {
                nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
index fa8c39804bdbae867dd5c08f1e308202c1aa1c52..61a9deec299332aa431c13ad15197974306ff117 100644 (file)
@@ -603,7 +603,7 @@ static void reqsk_timer_handler(unsigned long data)
                if (req->num_timeout++ == 0)
                        atomic_dec(&queue->young);
                timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
-               mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
+               mod_timer(&req->rsk_timer, jiffies + timeo);
                return;
        }
 drop:
@@ -617,8 +617,9 @@ static void reqsk_queue_hash_req(struct request_sock *req,
        req->num_timeout = 0;
        req->sk = NULL;
 
-       setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
-       mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
+       setup_pinned_timer(&req->rsk_timer, reqsk_timer_handler,
+                           (unsigned long)req);
+       mod_timer(&req->rsk_timer, jiffies + timeout);
 
        inet_ehash_insert(req_to_sk(req), NULL);
        /* before letting lookups find us, make sure all req fields
index 2065816748066986f0356df168c2d76fe2d53d85..ddcd56c08d14d37cec17719bb36be234a28dc35e 100644 (file)
@@ -188,7 +188,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
                tw->tw_prot         = sk->sk_prot_creator;
                atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
                twsk_net_set(tw, sock_net(sk));
-               setup_timer(&tw->tw_timer, tw_timer_handler, (unsigned long)tw);
+               setup_pinned_timer(&tw->tw_timer, tw_timer_handler,
+                                  (unsigned long)tw);
                /*
                 * Because we use RCU lookups, we should not set tw_refcnt
                 * to a non null value before everything is setup for this
@@ -248,7 +249,7 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
 
        tw->tw_kill = timeo <= 4*HZ;
        if (!rearm) {
-               BUG_ON(mod_timer_pinned(&tw->tw_timer, jiffies + timeo));
+               BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
                atomic_inc(&tw->tw_dr->tw_count);
        } else {
                mod_timer_pending(&tw->tw_timer, jiffies + timeo);
index 124bf0a663283502deb03397343160d493a378b1..4bd4921639c3e6415f8899896f72fe1564a68c55 100644 (file)
@@ -271,7 +271,7 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk
                return dst_output(net, sk, skb);
        }
 #endif
-       mtu = ip_skb_dst_mtu(skb);
+       mtu = ip_skb_dst_mtu(sk, skb);
        if (skb_is_gso(skb))
                return ip_finish_output_gso(net, sk, skb, mtu);
 
@@ -541,7 +541,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 
        iph = ip_hdr(skb);
 
-       mtu = ip_skb_dst_mtu(skb);
+       mtu = ip_skb_dst_mtu(sk, skb);
        if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
                mtu = IPCB(skb)->frag_max_size;
 
index d6c8f4cd080001a527f7c137021cc6a3f3604344..42bf89aaf6a5206cf384068a0a50a3130abe2a4e 100644 (file)
@@ -87,7 +87,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
 EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
 
 /* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
 
 int sysctl_tcp_stdurg __read_mostly;
 int sysctl_tcp_rfc1337 __read_mostly;
@@ -3421,6 +3421,23 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
        return flag;
 }
 
+static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
+                                  u32 *last_oow_ack_time)
+{
+       if (*last_oow_ack_time) {
+               s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
+
+               if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
+                       NET_INC_STATS(net, mib_idx);
+                       return true;    /* rate-limited: don't send yet! */
+               }
+       }
+
+       *last_oow_ack_time = tcp_time_stamp;
+
+       return false;   /* not rate-limited: go ahead, send dupack now! */
+}
+
 /* Return true if we're currently rate-limiting out-of-window ACKs and
  * thus shouldn't send a dupack right now. We rate-limit dupacks in
  * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
@@ -3434,21 +3451,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
        /* Data packets without SYNs are not likely part of an ACK loop. */
        if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
            !tcp_hdr(skb)->syn)
-               goto not_rate_limited;
-
-       if (*last_oow_ack_time) {
-               s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
-
-               if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
-                       NET_INC_STATS(net, mib_idx);
-                       return true;    /* rate-limited: don't send yet! */
-               }
-       }
-
-       *last_oow_ack_time = tcp_time_stamp;
+               return false;
 
-not_rate_limited:
-       return false;   /* not rate-limited: go ahead, send dupack now! */
+       return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
 }
 
 /* RFC 5961 7 [ACK Throttling] */
@@ -3458,21 +3463,26 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
        static u32 challenge_timestamp;
        static unsigned int challenge_count;
        struct tcp_sock *tp = tcp_sk(sk);
-       u32 now;
+       u32 count, now;
 
        /* First check our per-socket dupack rate limit. */
-       if (tcp_oow_rate_limited(sock_net(sk), skb,
-                                LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
-                                &tp->last_oow_ack_time))
+       if (__tcp_oow_rate_limited(sock_net(sk),
+                                  LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
+                                  &tp->last_oow_ack_time))
                return;
 
-       /* Then check the check host-wide RFC 5961 rate limit. */
+       /* Then check host-wide RFC 5961 rate limit. */
        now = jiffies / HZ;
        if (now != challenge_timestamp) {
+               u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
                challenge_timestamp = now;
-               challenge_count = 0;
+               WRITE_ONCE(challenge_count, half +
+                          prandom_u32_max(sysctl_tcp_challenge_ack_limit));
        }
-       if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+       count = READ_ONCE(challenge_count);
+       if (count > 0) {
+               WRITE_ONCE(challenge_count, count - 1);
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
                tcp_send_ack(sk);
        }
index ca5e8ea29538569c92f69c3ba69a6f9bf33cf8e4..4aed8fc23d328592f8cf267fca70582f62fc6a3e 100644 (file)
@@ -1583,6 +1583,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        if (sk_filter(sk, skb))
                goto drop;
+       if (unlikely(skb->len < sizeof(struct udphdr)))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
index 1bcef2369d64e6f1325dcab50c14601e6ca5a40a..771be1fa41764aa8ea3b570a058ee84b109903b9 100644 (file)
@@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
                }
        }
 
+       free_percpu(non_pcpu_rt->rt6i_pcpu);
        non_pcpu_rt->rt6i_pcpu = NULL;
 }
 
index 005dc82c2138e036d13934356da25a63e3d7b4ba..acc09705618b4cccd622122f58ce4ab1639f22d1 100644 (file)
@@ -620,6 +620,8 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
 
        if (sk_filter(sk, skb))
                goto drop;
+       if (unlikely(skb->len < sizeof(struct udphdr)))
+               goto drop;
 
        udp_csum_pull_header(skb);
        if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
index d4fdf8f7b47179b39e475dca9833ee6a6c25c4ca..8f5678cb62635cc5c76bba711091929d6054939e 100644 (file)
@@ -246,9 +246,6 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
 {
        struct serial_struct info;
 
-       if (!retinfo)
-               return -EFAULT;
-
        memset(&info, 0, sizeof(info));
        info.line = self->line;
        info.flags = self->port.flags;
@@ -258,11 +255,6 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self,
 
        /* For compatibility  */
        info.type = PORT_16550A;
-       info.port = 0;
-       info.irq = 0;
-       info.xmit_fifo_size = 0;
-       info.hub6 = 0;
-       info.custom_divisor = 0;
 
        if (copy_to_user(retinfo, &info, sizeof(*retinfo)))
                return -EFAULT;
index 803001a45aa16e6b5a372ab385dba8e9c09bd2f0..1b07578bedf336c53e3b6072c8c3324f7f18081b 100644 (file)
@@ -1545,7 +1545,8 @@ error:
 /*
  *      Set up receiving multicast socket over UDP
  */
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
+static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
+                                       int ifindex)
 {
        /* multicast addr */
        union ipvs_sockaddr mcast_addr;
@@ -1566,6 +1567,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id)
                set_sock_size(sock->sk, 0, result);
 
        get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
+       sock->sk->sk_bound_dev_if = ifindex;
        result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
        if (result < 0) {
                pr_err("Error binding to the multicast addr\n");
@@ -1868,7 +1870,7 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
                if (state == IP_VS_STATE_MASTER)
                        sock = make_send_sock(ipvs, id);
                else
-                       sock = make_receive_sock(ipvs, id);
+                       sock = make_receive_sock(ipvs, id, dev->ifindex);
                if (IS_ERR(sock)) {
                        result = PTR_ERR(sock);
                        goto outtinfo;
index f204274a9b6b9dfaa0bc66d69c15d85255a87a17..4cbda4bd89266e3d8b5e571aa2023dbf32941e9d 100644 (file)
@@ -83,6 +83,13 @@ void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
        spin_lock(lock);
        while (unlikely(nf_conntrack_locks_all)) {
                spin_unlock(lock);
+
+               /*
+                * Order the 'nf_conntrack_locks_all' load vs. the
+                * spin_unlock_wait() loads below, to ensure
+                * that 'nf_conntrack_locks_all_lock' is indeed held:
+                */
+               smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
                spin_unlock_wait(&nf_conntrack_locks_all_lock);
                spin_lock(lock);
        }
@@ -128,6 +135,14 @@ static void nf_conntrack_all_lock(void)
        spin_lock(&nf_conntrack_locks_all_lock);
        nf_conntrack_locks_all = true;
 
+       /*
+        * Order the above store of 'nf_conntrack_locks_all' against
+        * the spin_unlock_wait() loads below, such that if
+        * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
+        * we must observe nf_conntrack_locks[] held:
+        */
+       smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
+
        for (i = 0; i < CONNTRACK_LOCKS; i++) {
                spin_unlock_wait(&nf_conntrack_locks[i]);
        }
@@ -135,7 +150,13 @@ static void nf_conntrack_all_lock(void)
 
 static void nf_conntrack_all_unlock(void)
 {
-       nf_conntrack_locks_all = false;
+       /*
+        * All prior stores must be complete before we clear
+        * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
+        * might observe the false value but not the entire
+        * critical section:
+        */
+       smp_store_release(&nf_conntrack_locks_all, false);
        spin_unlock(&nf_conntrack_locks_all_lock);
 }
 
@@ -646,6 +667,7 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
 
        l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
        if (l4proto->allow_clash &&
+           !nfct_nat(ct) &&
            !nf_ct_is_dying(ct) &&
            atomic_inc_not_zero(&ct->ct_general.use)) {
                nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
@@ -1601,8 +1623,15 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
        unsigned int nr_slots, i;
        size_t sz;
 
+       if (*sizep > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+               return NULL;
+
        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct hlist_head));
        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct hlist_nulls_head));
+
+       if (nr_slots > (UINT_MAX / sizeof(struct hlist_nulls_head)))
+               return NULL;
+
        sz = nr_slots * sizeof(struct hlist_nulls_head);
        hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
                                        get_order(sz));
index 2c881871db381c1142cb3a0591850325b75fa619..cf7c74599cbe5e6b800b584bee75d60f62d84a73 100644 (file)
@@ -1724,9 +1724,11 @@ struct nft_expr *nft_expr_init(const struct nft_ctx *ctx,
 
        err = nf_tables_newexpr(ctx, &info, expr);
        if (err < 0)
-               goto err2;
+               goto err3;
 
        return expr;
+err3:
+       kfree(expr);
 err2:
        module_put(info.ops->type->owner);
 err1:
index 137e308d5b24c0865336da92e97686d8fef0b9d1..81fbb450783e59932ecc1c67147fc0ca89b475d3 100644 (file)
@@ -54,7 +54,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
        const struct nf_conn_help *help;
        const struct nf_conntrack_tuple *tuple;
        const struct nf_conntrack_helper *helper;
-       long diff;
        unsigned int state;
 
        ct = nf_ct_get(pkt->skb, &ctinfo);
@@ -94,10 +93,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                return;
 #endif
        case NFT_CT_EXPIRATION:
-               diff = (long)jiffies - (long)ct->timeout.expires;
-               if (diff < 0)
-                       diff = 0;
-               *dest = jiffies_to_msecs(diff);
+               *dest = jiffies_to_msecs(nf_ct_expires(ct));
                return;
        case NFT_CT_HELPER:
                if (ct->master == NULL)
index 16c50b0dd426840f79bda0bb3ebbd28ecb2845e5..f4bad9dc15c48b0d8635632dc50179457b5c49c1 100644 (file)
@@ -227,7 +227,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
                        skb->pkt_type = value;
                break;
        case NFT_META_NFTRACE:
-               skb->nf_trace = 1;
+               skb->nf_trace = !!value;
                break;
        default:
                WARN_ON(1);
index 9bff6ef16fa7632fcfc05f23dd696d75ead6d5e8..b43c4015b2f79678bb5a2edffdd4e57cf9bdb880 100644 (file)
@@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
                                      struct sk_buff *skb,
                                      unsigned int num)
 {
-       return reciprocal_scale(skb_get_hash(skb), num);
+       return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
 }
 
 static unsigned int fanout_demux_lb(struct packet_fanout *f,
@@ -1927,13 +1927,11 @@ retry:
                goto out_unlock;
        }
 
-       sockc.tsflags = 0;
+       sockc.tsflags = sk->sk_tsflags;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
-               if (unlikely(err)) {
-                       err = -EINVAL;
+               if (unlikely(err))
                        goto out_unlock;
-               }
        }
 
        skb->protocol = proto;
@@ -2678,7 +2676,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
        }
 
-       sockc.tsflags = 0;
+       sockc.tsflags = po->sk.sk_tsflags;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(&po->sk, msg, &sockc);
                if (unlikely(err))
@@ -2881,7 +2879,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        if (unlikely(!(dev->flags & IFF_UP)))
                goto out_unlock;
 
-       sockc.tsflags = 0;
+       sockc.tsflags = sk->sk_tsflags;
        sockc.mark = sk->sk_mark;
        if (msg->msg_controllen) {
                err = sock_cmsg_send(sk, msg, &sockc);
index 74ee126a6fe6c00ce8bce45053ec2b8805fc222e..c8a7b4c90190cafe8dc6fefe3e0a1935c558c357 100644 (file)
@@ -616,7 +616,7 @@ static int rds_tcp_init(void)
 
        ret = rds_tcp_recv_init();
        if (ret)
-               goto out_slab;
+               goto out_pernet;
 
        ret = rds_trans_register(&rds_tcp_transport);
        if (ret)
@@ -628,8 +628,9 @@ static int rds_tcp_init(void)
 
 out_recv:
        rds_tcp_recv_exit();
-out_slab:
+out_pernet:
        unregister_pernet_subsys(&rds_tcp_net_ops);
+out_slab:
        kmem_cache_destroy(rds_tcp_conn_slab);
 out:
        return ret;
index 79c4abcfa6b4ee86fbd92358f5585f88b74319f5..0a6394754e81db5469906b92f4b40046444bdb42 100644 (file)
@@ -164,7 +164,8 @@ static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int framety
                rose_frames_acked(sk, nr);
                if (ns == rose->vr) {
                        rose_start_idletimer(sk);
-                       if (sock_queue_rcv_skb(sk, skb) == 0) {
+                       if (sk_filter_trim_cap(sk, skb, ROSE_MIN_LEN) == 0 &&
+                           __sock_queue_rcv_skb(sk, skb) == 0) {
                                rose->vr = (rose->vr + 1) % ROSE_MODULUS;
                                queued = 1;
                        } else {
index 128942bc9e42e82ed11dad14448b69cd6858e541..1f5bd6ccbd2c6162f328aa209e2140bad99f1e3f 100644 (file)
@@ -181,7 +181,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
 
        if (!(at & AT_EGRESS)) {
                if (m->tcfm_ok_push)
-                       skb_push(skb2, skb->mac_len);
+                       skb_push_rcsum(skb2, skb->mac_len);
        }
 
        /* mirror is always swallowed */
index 62f9d8100c6eebc3ca60510e8f854b7e7e6c7a5f..052f84d6cc236176b64adbd2c2c3c7f38b18cb43 100644 (file)
@@ -1140,8 +1140,10 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
 
        if (!cl->level && cl->un.leaf.q)
                qlen = cl->un.leaf.q->q.qlen;
-       cl->xstats.tokens = PSCHED_NS2TICKS(cl->tokens);
-       cl->xstats.ctokens = PSCHED_NS2TICKS(cl->ctokens);
+       cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
+                                   INT_MIN, INT_MAX);
+       cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
+                                    INT_MIN, INT_MAX);
 
        if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
            gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
index a701527a9480faff1b8d91257e1dbf3c0f09ed68..47cf4604d19c23a4bf392c87d06de3e44680d5c0 100644 (file)
@@ -112,7 +112,6 @@ int sctp_rcv(struct sk_buff *skb)
        struct sctp_ep_common *rcvr;
        struct sctp_transport *transport = NULL;
        struct sctp_chunk *chunk;
-       struct sctphdr *sh;
        union sctp_addr src;
        union sctp_addr dest;
        int family;
@@ -127,8 +126,6 @@ int sctp_rcv(struct sk_buff *skb)
        if (skb_linearize(skb))
                goto discard_it;
 
-       sh = sctp_hdr(skb);
-
        /* Pull up the IP and SCTP headers. */
        __skb_pull(skb, skb_transport_offset(skb));
        if (skb->len < sizeof(struct sctphdr))
@@ -230,7 +227,7 @@ int sctp_rcv(struct sk_buff *skb)
        chunk->rcvr = rcvr;
 
        /* Remember the SCTP header. */
-       chunk->sctp_hdr = sh;
+       chunk->sctp_hdr = sctp_hdr(skb);
 
        /* Set the source and destination addresses of the incoming chunk.  */
        sctp_init_addrs(chunk, &src, &dest);
index bf8f05c3eb82aad367d77a0a7016dee4331d00dd..a597708ae3818b25f9eaee8f1c3584865432a37d 100644 (file)
@@ -330,6 +330,21 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
        return 0;
 }
 
+/* tipc_bearer_reset_all - reset all links on all bearers
+ */
+void tipc_bearer_reset_all(struct net *net)
+{
+       struct tipc_net *tn = tipc_net(net);
+       struct tipc_bearer *b;
+       int i;
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+               b = rcu_dereference_rtnl(tn->bearer_list[i]);
+               if (b)
+                       tipc_reset_bearer(net, b);
+       }
+}
+
 /**
  * bearer_disable
  *
index f686e41b5abb880cc09f9c1a707356047d32749f..60e49c3be19c1e44b44175926b10c6aecb64fa58 100644 (file)
@@ -198,6 +198,7 @@ void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
 void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
 struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
 struct tipc_media *tipc_media_find(const char *name);
+void tipc_bearer_reset_all(struct net *net);
 int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
index 67b6ab9f4c8dfa01cb2f75632d8aa23e7f6235aa..7d89f8713d4984f3a9d732c9cabc56d410219ad8 100644 (file)
@@ -349,6 +349,8 @@ void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
        u16 ack = snd_l->snd_nxt - 1;
 
        snd_l->ackers--;
+       rcv_l->bc_peer_is_up = true;
+       rcv_l->state = LINK_ESTABLISHED;
        tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
        tipc_link_reset(rcv_l);
        rcv_l->state = LINK_RESET;
@@ -1559,7 +1561,12 @@ void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
        if (!msg_peer_node_is_up(hdr))
                return;
 
-       l->bc_peer_is_up = true;
+       /* Open when peer ackowledges our bcast init msg (pkt #1) */
+       if (msg_ack(hdr))
+               l->bc_peer_is_up = true;
+
+       if (!l->bc_peer_is_up)
+               return;
 
        /* Ignore if peers_snd_nxt goes beyond receive window */
        if (more(peers_snd_nxt, l->rcv_nxt + l->window))
index 3ad9fab1985f1cdca35c876f0f228d28b9b5db00..1fd4647647650b75f17f41d19d288be7abe436a3 100644 (file)
@@ -604,7 +604,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
 
        link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
        link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
-       nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]),
+       nla_strlcpy(link_info.str, link[TIPC_NLA_LINK_NAME],
                    TIPC_MAX_LINK_NAME);
 
        return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
index e01e2c71b5a16fde975b6e87bd134ae57cd57326..23d4761842a0ed9ac62eb1af5cc03259aadeb22d 100644 (file)
@@ -1297,10 +1297,6 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
 
        rc = tipc_bcast_rcv(net, be->link, skb);
 
-       /* Broadcast link reset may happen at reassembly failure */
-       if (rc & TIPC_LINK_DOWN_EVT)
-               tipc_node_reset_links(n);
-
        /* Broadcast ACKs are sent on a unicast link */
        if (rc & TIPC_LINK_SND_BC_ACK) {
                tipc_node_read_lock(n);
@@ -1320,6 +1316,17 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
                spin_unlock_bh(&be->inputq2.lock);
                tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
        }
+
+       if (rc & TIPC_LINK_DOWN_EVT) {
+               /* Reception reassembly failure => reset all links to peer */
+               if (!tipc_link_is_up(be->link))
+                       tipc_node_reset_links(n);
+
+               /* Retransmission failure => reset all links to all peers */
+               if (!tipc_link_is_up(tipc_bc_sndlink(net)))
+                       tipc_bearer_reset_all(net);
+       }
+
        tipc_node_put(n);
 }
 
index d7599014055dfc1c3d256d917e3a3645f1a27143..7d72283901a3bb8fa7d3294e5dcdfbc6cd61e3a2 100644 (file)
@@ -3487,16 +3487,16 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                params.smps_mode = NL80211_SMPS_OFF;
        }
 
+       params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+       if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
+               return -EOPNOTSUPP;
+
        if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
                params.acl = parse_acl_data(&rdev->wiphy, info);
                if (IS_ERR(params.acl))
                        return PTR_ERR(params.acl);
        }
 
-       params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
-       if (params.pbss && !rdev->wiphy.bands[NL80211_BAND_60GHZ])
-               return -EOPNOTSUPP;
-
        wdev_lock(wdev);
        err = rdev_start_ap(rdev, dev, &params);
        if (!err) {
index 2443ee30ba5b6b0b2844af1cd28d7aa986e44bbf..b7d1592bd5b8939f50d01ee2ce11a95c0df0e93b 100644 (file)
@@ -721,6 +721,8 @@ __ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
         * alignment since sizeof(struct ethhdr) is 14.
         */
        frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + cur_len);
+       if (!frame)
+               return NULL;
 
        skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
        skb_copy_bits(skb, offset, skb_put(frame, cur_len), cur_len);
index 045e0098e96244be631ae8272c1d94968da3d7e9..e4d017d5381949243337b78c56284bb52a43c610 100644 (file)
@@ -13,11 +13,26 @@ include scripts/Kbuild.include
 # Create output directory if not already present
 _dummy := $(shell [ -d $(obj) ] || mkdir -p $(obj))
 
+# Stale wrappers when the corresponding files are removed from generic-y
+# need removing.
+generated-y   := $(generic-y) $(genhdr-y) $(generated-y)
+all-files     := $(patsubst %, $(obj)/%, $(generated-y))
+old-headers   := $(wildcard $(obj)/*.h)
+unwanted      := $(filter-out $(all-files),$(old-headers))
+
 quiet_cmd_wrap = WRAP    $@
 cmd_wrap = echo "\#include <asm-generic/$*.h>" >$@
 
-all: $(patsubst %, $(obj)/%, $(generic-y))
+quiet_cmd_remove = REMOVE  $(unwanted)
+cmd_remove = rm -f $(unwanted)
+
+all: $(patsubst %, $(obj)/%, $(generic-y)) FORCE
+       $(if $(unwanted),$(call cmd,remove),)
        @:
 
 $(obj)/%.h:
        $(call cmd,wrap)
+
+PHONY += FORCE
+.PHONY: $(PHONY)
+FORCE: ;
index 52e4e61140d1226f5ba80676d1973baa298d3b23..2573543842d06947cf081ff3cf752c8d309f094b 100644 (file)
@@ -1,2 +1,3 @@
 *.pyc
 *.pyo
+constants.py
index cd129e65d1ffdbbb06e6f1096340e00f1cff8d9e..8b00031f53497035b68bfe65bd682989430fe016 100644 (file)
@@ -13,9 +13,11 @@ quiet_cmd_gen_constants_py = GEN     $@
        $(CPP) -E -x c -P $(c_flags) $< > $@ ;\
        sed -i '1,/<!-- end-c-headers -->/d;' $@
 
-$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in
-       $(call if_changed,gen_constants_py)
+targets += constants.py
+$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in FORCE
+       $(call if_changed_dep,gen_constants_py)
 
 build_constants_py: $(obj)/constants.py
+       @:
 
 clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py
index 07e6c2befe368665ed7b35bd4fa9f08e1600d4dc..7986f4e0da123a240eeca854666dd3cfac86d69b 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <linux/fs.h>
 #include <linux/mount.h>
-#include <linux/radix-tree.h>
 
 /* We need to stringify expanded macros so that they can be parsed */
 
@@ -51,9 +50,3 @@ LX_VALUE(MNT_NOEXEC)
 LX_VALUE(MNT_NOATIME)
 LX_VALUE(MNT_NODIRATIME)
 LX_VALUE(MNT_RELATIME)
-
-/* linux/radix-tree.h */
-LX_VALUE(RADIX_TREE_INDIRECT_PTR)
-LX_GDBPARSED(RADIX_TREE_HEIGHT_MASK)
-LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
-LX_GDBPARSED(RADIX_TREE_MAP_MASK)
diff --git a/scripts/gdb/linux/radixtree.py b/scripts/gdb/linux/radixtree.py
deleted file mode 100644 (file)
index 0fdef4e..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-#
-# gdb helper commands and functions for Linux kernel debugging
-#
-#  Radix Tree Parser
-#
-# Copyright (c) 2016 Linaro Ltd
-#
-# Authors:
-#  Kieran Bingham <kieran.bingham@linaro.org>
-#
-# This work is licensed under the terms of the GNU GPL version 2.
-#
-
-import gdb
-
-from linux import utils
-from linux import constants
-
-radix_tree_root_type = utils.CachedType("struct radix_tree_root")
-radix_tree_node_type = utils.CachedType("struct radix_tree_node")
-
-
-def is_indirect_ptr(node):
-    long_type = utils.get_long_type()
-    return (node.cast(long_type) & constants.LX_RADIX_TREE_INDIRECT_PTR)
-
-
-def indirect_to_ptr(node):
-    long_type = utils.get_long_type()
-    node_type = node.type
-    indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INDIRECT_PTR
-    return indirect_ptr.cast(node_type)
-
-
-def maxindex(height):
-    height = height & constants.LX_RADIX_TREE_HEIGHT_MASK
-    return gdb.parse_and_eval("height_to_maxindex["+str(height)+"]")
-
-
-def lookup(root, index):
-    if root.type == radix_tree_root_type.get_type().pointer():
-        root = root.dereference()
-    elif root.type != radix_tree_root_type.get_type():
-        raise gdb.GdbError("Must be struct radix_tree_root not {}"
-                           .format(root.type))
-
-    node = root['rnode']
-    if node is 0:
-        return None
-
-    if not (is_indirect_ptr(node)):
-        if (index > 0):
-            return None
-        return node
-
-    node = indirect_to_ptr(node)
-
-    height = node['path'] & constants.LX_RADIX_TREE_HEIGHT_MASK
-    if (index > maxindex(height)):
-        return None
-
-    shift = (height-1) * constants.LX_RADIX_TREE_MAP_SHIFT
-
-    while True:
-        new_index = (index >> shift) & constants.LX_RADIX_TREE_MAP_MASK
-        slot = node['slots'][new_index]
-
-        node = slot.cast(node.type.pointer()).dereference()
-        if node is 0:
-            return None
-
-        shift -= constants.LX_RADIX_TREE_MAP_SHIFT
-        height -= 1
-
-        if (height <= 0):
-            break
-
-    return node
-
-
-class LxRadixTree(gdb.Function):
-    """ Lookup and return a node from a RadixTree.
-
-$lx_radix_tree_lookup(root_node [, index]): Return the node at the given index.
-If index is omitted, the root node is dereferenced and returned."""
-
-    def __init__(self):
-        super(LxRadixTree, self).__init__("lx_radix_tree_lookup")
-
-    def invoke(self, root, index=0):
-        result = lookup(root, index)
-        if result is None:
-            raise gdb.GdbError("No entry in tree at index {}".format(index))
-
-        return result
-
-LxRadixTree()
index 9a0f8923f67ccb870224a93d8884458a1fa11a30..004b0ac7fa72d25598d26fbab4b2035e3e882305 100644 (file)
@@ -153,7 +153,7 @@ lx-symbols command."""
             saved_state['breakpoint'].enabled = saved_state['enabled']
 
     def invoke(self, arg, from_tty):
-        self.module_paths = arg.split()
+        self.module_paths = [os.path.expanduser(p) for p in arg.split()]
         self.module_paths.append(os.getcwd())
 
         # enforce update
index 3a80ad6eecad7167c3bb11fb2314f1d1c43c0a89..6e0b0afd888ade32768edba8e659b61a4f396ac0 100644 (file)
@@ -31,4 +31,3 @@ else:
     import linux.lists
     import linux.proc
     import linux.constants
-    import linux.radixtree
index 840b97328b399188edce45dc3a456917f2bf99d7..e4d90e50f6fece5db963af40a395d8ff4a4e8ecc 100644 (file)
@@ -202,5 +202,9 @@ int main(void)
        DEVID_FIELD(hda_device_id, rev_id);
        DEVID_FIELD(hda_device_id, api_version);
 
+       DEVID(fsl_mc_device_id);
+       DEVID_FIELD(fsl_mc_device_id, vendor);
+       DEVID_FIELD(fsl_mc_device_id, obj_type);
+
        return 0;
 }
index fec75786f75bbb348d03143af7d2280015394643..29d6699d5a06c1eddb52e9aa4175766297754e4c 100644 (file)
@@ -1289,6 +1289,18 @@ static int do_hda_entry(const char *filename, void *symval, char *alias)
 }
 ADD_TO_DEVTABLE("hdaudio", hda_device_id, do_hda_entry);
 
+/* Looks like: fsl-mc:vNdN */
+static int do_fsl_mc_entry(const char *filename, void *symval,
+                          char *alias)
+{
+       DEF_FIELD(symval, fsl_mc_device_id, vendor);
+       DEF_FIELD_ADDR(symval, fsl_mc_device_id, obj_type);
+
+       sprintf(alias, "fsl-mc:v%08Xd%s", vendor, *obj_type);
+       return 1;
+}
+ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry);
+
 /* Does namelen bytes of name exactly match the symbol? */
 static bool sym_is(const char *name, unsigned namelen, const char *symbol)
 {
index 2660fbcf94d1e0e8affa72d4656584bc0aa06216..7798e1608f4f4bb429c94017e2c6f73b07a7afc9 100644 (file)
@@ -500,34 +500,34 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
 {
        struct common_audit_data sa;
        struct apparmor_audit_data aad = {0,};
-       char *command, *args = value;
+       char *command, *largs = NULL, *args = value;
        size_t arg_size;
        int error;
 
        if (size == 0)
                return -EINVAL;
-       /* args points to a PAGE_SIZE buffer, AppArmor requires that
-        * the buffer must be null terminated or have size <= PAGE_SIZE -1
-        * so that AppArmor can null terminate them
-        */
-       if (args[size - 1] != '\0') {
-               if (size == PAGE_SIZE)
-                       return -EINVAL;
-               args[size] = '\0';
-       }
-
        /* task can only write its own attributes */
        if (current != task)
                return -EACCES;
 
-       args = value;
+       /* AppArmor requires that the buffer must be null terminated atm */
+       if (args[size - 1] != '\0') {
+               /* null terminate */
+               largs = args = kmalloc(size + 1, GFP_KERNEL);
+               if (!args)
+                       return -ENOMEM;
+               memcpy(args, value, size);
+               args[size] = '\0';
+       }
+
+       error = -EINVAL;
        args = strim(args);
        command = strsep(&args, " ");
        if (!args)
-               return -EINVAL;
+               goto out;
        args = skip_spaces(args);
        if (!*args)
-               return -EINVAL;
+               goto out;
 
        arg_size = size - (args - (char *) value);
        if (strcmp(name, "current") == 0) {
@@ -553,10 +553,12 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
                        goto fail;
        } else
                /* only support the "current" and "exec" process attributes */
-               return -EINVAL;
+               goto fail;
 
        if (!error)
                error = size;
+out:
+       kfree(largs);
        return error;
 
 fail:
@@ -565,9 +567,9 @@ fail:
        aad.profile = aa_current_profile();
        aad.op = OP_SETPROCATTR;
        aad.info = name;
-       aad.error = -EINVAL;
+       aad.error = error = -EINVAL;
        aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
-       return -EINVAL;
+       goto out;
 }
 
 static int apparmor_task_setrlimit(struct task_struct *task,
index a85d45595d02a265f1f8e1a4cd36c812d8cd08f0..b4fe9b00251251d596fbfbce2dd87eb44841b917 100644 (file)
@@ -160,6 +160,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
        
        if (snd_BUG_ON(!card || !id))
                return;
+       if (card->shutdown)
+               return;
        read_lock(&card->ctl_files_rwlock);
 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
        card->mixer_oss_change_count++;
index 308c9ecf73db18415aa1157ee4e895731a5abc36..8e980aa678d0d8412e951d78f3026f25e6f6badb 100644 (file)
@@ -849,6 +849,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
 }
 EXPORT_SYMBOL(snd_pcm_new_internal);
 
+static void free_chmap(struct snd_pcm_str *pstr)
+{
+       if (pstr->chmap_kctl) {
+               snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
+               pstr->chmap_kctl = NULL;
+       }
+}
+
 static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
 {
        struct snd_pcm_substream *substream, *substream_next;
@@ -871,6 +879,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
                kfree(setup);
        }
 #endif
+       free_chmap(pstr);
        if (pstr->substream_count)
                put_device(&pstr->dev);
 }
@@ -1135,10 +1144,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
        for (cidx = 0; cidx < 2; cidx++) {
                if (!pcm->internal)
                        snd_unregister_device(&pcm->streams[cidx].dev);
-               if (pcm->streams[cidx].chmap_kctl) {
-                       snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
-                       pcm->streams[cidx].chmap_kctl = NULL;
-               }
+               free_chmap(&pcm->streams[cidx]);
        }
        mutex_unlock(&pcm->open_mutex);
        mutex_unlock(&register_mutex);
index e722022d325d7771d0d516c08ad830d814eeebdc..9a6157ea6881703310586bea76f5e1c61fcfb2a5 100644 (file)
@@ -1955,6 +1955,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
                qhead = tu->qhead++;
                tu->qhead %= tu->queue_size;
+               tu->qused--;
                spin_unlock_irq(&tu->qlock);
 
                if (tu->tread) {
@@ -1968,7 +1969,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
                }
 
                spin_lock_irq(&tu->qlock);
-               tu->qused--;
                if (err < 0)
                        goto _error;
                result += unit;
index 4a054d72011246db38fbdba68c25f3da0c0ce372..d3125c16968457436de5b659da2127157b5fbcba 100644 (file)
@@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
        int page, p, pp, delta, i;
 
        page =
-           (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
-            WT_SUBBUF_MASK)
-           >> WT_SUBBUF_SHIFT;
+           (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
+            >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
        if (dma->nr_periods >= 4)
                delta = (page - dma->period_real) & 3;
        else {
index 1cb85aeb0cea058d35e5ed4b48b1ce6f9d166ff2..286f5e3686a3e7aa7a504dbf68cfb2104bdd4bdb 100644 (file)
@@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
        u32 pipe_alloc_mask;
        int err;
 
-       commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
+       commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
        if (commpage_bak == NULL)
                return -ENOMEM;
        commpage = chip->comm_page;
-       memcpy(commpage_bak, commpage, sizeof(struct comm_page));
+       memcpy(commpage_bak, commpage, sizeof(*commpage));
 
        err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
        if (err < 0) {
index 320445f3bf736d51e3dcfc884cdf5f7aef0b3855..79c7b340acc2361518b51504b7ecb032a71991b2 100644 (file)
@@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
 
        for (n = 0; n < spec->paths.used; n++) {
                path = snd_array_elem(&spec->paths, n);
+               if (!path->depth)
+                       continue;
                if (path->path[0] == nid ||
                    path->path[path->depth - 1] == nid) {
                        bool pin_old = path->pin_enabled;
index 94089fc71884a93e38ddbbefdad9a978d02430c6..6f8ea13323c1819c27dbe10f2af0293415f06135 100644 (file)
@@ -367,9 +367,10 @@ enum {
 #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
 #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
 #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
+#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
 #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
-                       IS_KBL(pci) || IS_KBL_LP(pci)
+                       IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
 
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
@@ -1217,8 +1218,10 @@ static int azx_free(struct azx *chip)
        if (use_vga_switcheroo(hda)) {
                if (chip->disabled && hda->probe_continued)
                        snd_hda_unlock_devices(&chip->bus);
-               if (hda->vga_switcheroo_registered)
+               if (hda->vga_switcheroo_registered) {
                        vga_switcheroo_unregister_client(chip->pci);
+                       vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
+               }
        }
 
        if (bus->chip_init) {
@@ -2190,6 +2193,9 @@ static const struct pci_device_id azx_ids[] = {
        /* Kabylake-LP */
        { PCI_DEVICE(0x8086, 0x9d71),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
+       /* Kabylake-H */
+       { PCI_DEVICE(0x8086, 0xa2f0),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
        /* Broxton-P(Apollolake) */
        { PCI_DEVICE(0x8086, 0x5a98),
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
@@ -2263,6 +2269,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x157a),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0x15b3),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x793b),
          .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(0x1002, 0x7919),
index 900bfbc3368c3ffc70f74a4b7b2ed41cbf550290..abcb5a6a1cd9547db9c83e1cada1456615d47db8 100644 (file)
@@ -5651,6 +5651,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
        SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
+       SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -5737,7 +5738,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {}
 };
 #define ALC225_STANDARD_PINS \
-       {0x12, 0xb7a60130}, \
        {0x21, 0x04211020}
 
 #define ALC256_STANDARD_PINS \
@@ -5762,10 +5762,24 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60130},
                {0x14, 0x901701a0}),
        SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60130},
                {0x14, 0x901701b0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60150},
+               {0x14, 0x901701a0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60150},
+               {0x14, 0x901701b0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x12, 0xb7a60130},
+               {0x1b, 0x90170110}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
index 4d82a58ff6b0b9c43c9f6f892ecbd6ab79e44de8..f3fb98f0a995bb41dd473ccc4443164e404c9883 100644 (file)
@@ -483,9 +483,10 @@ config SND_SOC_DMIC
        tristate
 
 config SND_SOC_HDMI_CODEC
-       tristate
-       select SND_PCM_ELD
-       select SND_PCM_IEC958
+       tristate
+       select SND_PCM_ELD
+       select SND_PCM_IEC958
+       select HDMI
 
 config SND_SOC_ES8328
        tristate "Everest Semi ES8328 CODEC"
index 647f69de6baac94c3d6aef8a9ff297894ec0c47b..5013d2ba0c10a968045d83e5e5cef6e223e55436 100644 (file)
@@ -146,6 +146,7 @@ static const struct regmap_config ak4613_regmap_cfg = {
        .max_register           = 0x16,
        .reg_defaults           = ak4613_reg,
        .num_reg_defaults       = ARRAY_SIZE(ak4613_reg),
+       .cache_type             = REGCACHE_RBTREE,
 };
 
 static const struct of_device_id ak4613_of_match[] = {
@@ -530,7 +531,6 @@ static int ak4613_i2c_remove(struct i2c_client *client)
 static struct i2c_driver ak4613_i2c_driver = {
        .driver = {
                .name = "ak4613-codec",
-               .owner = THIS_MODULE,
                .of_match_table = ak4613_of_match,
        },
        .probe          = ak4613_i2c_probe,
index d6f4abbbf8a7fd669237d667a03825c13c3603ed..fb3885fe0afb75a872a8d08f7d6fb931555e2446 100644 (file)
@@ -226,6 +226,7 @@ static int v253_open(struct tty_struct *tty)
        if (!tty->disc_data)
                return -ENODEV;
 
+       tty->receive_room = 16;
        if (tty->ops->write(tty, v253_init, len) != len) {
                ret = -EIO;
                goto err;
index 181cd3bf0b926c750257eee7351558405e2a7e30..2abb742fc47b53f19a3a2024fd6424a86c63c631 100644 (file)
@@ -1474,6 +1474,11 @@ static int hdmi_codec_probe(struct snd_soc_codec *codec)
         * exit, we call pm_runtime_suspend() so that will do for us
         */
        hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+       if (!hlink) {
+               dev_err(&edev->hdac.dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_get(edev->ebus, hlink);
 
        ret = create_fill_widget_route_map(dapm);
@@ -1634,6 +1639,11 @@ static int hdac_hdmi_dev_probe(struct hdac_ext_device *edev)
 
        /* hold the ref while we probe */
        hlink = snd_hdac_ext_bus_get_link(edev->ebus, dev_name(&edev->hdac.dev));
+       if (!hlink) {
+               dev_err(&edev->hdac.dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_get(edev->ebus, hlink);
 
        hdmi_priv = devm_kzalloc(&codec->dev, sizeof(*hdmi_priv), GFP_KERNEL);
@@ -1744,6 +1754,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
        }
 
        hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+       if (!hlink) {
+               dev_err(dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_put(ebus, hlink);
 
        return 0;
@@ -1765,6 +1780,11 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
                return 0;
 
        hlink = snd_hdac_ext_bus_get_link(ebus, dev_name(dev));
+       if (!hlink) {
+               dev_err(dev, "hdac link not found\n");
+               return -EIO;
+       }
+
        snd_hdac_ext_bus_link_get(ebus, hlink);
 
        err = snd_hdac_display_power(bus, true);
index 3c6594da6c9c95c8abb198706cac920bae3959da..d70847c9eeb03d8aeba2c316eadbaf68aed237bc 100644 (file)
@@ -253,7 +253,7 @@ static const struct reg_default rt5650_reg[] = {
        { 0x2b, 0x5454 },
        { 0x2c, 0xaaa0 },
        { 0x2d, 0x0000 },
-       { 0x2f, 0x1002 },
+       { 0x2f, 0x5002 },
        { 0x31, 0x5000 },
        { 0x32, 0x0000 },
        { 0x33, 0x0000 },
index 49a9e7049e2ba1457621cd549ca3722cce0a0688..0af5ddbef1daaa0b4fa7dbfc89c72c52b6a84bfd 100644 (file)
@@ -619,7 +619,7 @@ static const struct snd_kcontrol_new rt5670_snd_controls[] = {
                RT5670_L_MUTE_SFT, RT5670_R_MUTE_SFT, 1, 1),
        SOC_DOUBLE_TLV("HP Playback Volume", RT5670_HP_VOL,
                RT5670_L_VOL_SFT, RT5670_R_VOL_SFT,
-               39, 0, out_vol_tlv),
+               39, 1, out_vol_tlv),
        /* OUTPUT Control */
        SOC_DOUBLE("OUT Channel Switch", RT5670_LOUT1,
                RT5670_VOL_L_SFT, RT5670_VOL_R_SFT, 1, 1),
index da60e3fe5ee7afb37740b750e0f15f6156d6a4eb..e7fe6b7b95b7fc8e29b3da06f770c9d52f8bd155 100644 (file)
@@ -1872,7 +1872,7 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
                .capture = {
                        .stream_name = "Audio Trace CPU",
                        .channels_min = 1,
-                       .channels_max = 6,
+                       .channels_max = 4,
                        .rates = WM5102_RATES,
                        .formats = WM5102_FORMATS,
                },
index b5820e4d547170a4a02ce0a46b09a3712ea42ce1..d54f1b46c9ec08107427ff0f6bf81bf4670103bb 100644 (file)
@@ -1723,6 +1723,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
        { "OUT2L", NULL, "SYSCLK" },
        { "OUT2R", NULL, "SYSCLK" },
        { "OUT3L", NULL, "SYSCLK" },
+       { "OUT3R", NULL, "SYSCLK" },
        { "OUT4L", NULL, "SYSCLK" },
        { "OUT4R", NULL, "SYSCLK" },
        { "OUT5L", NULL, "SYSCLK" },
index f6f9395ea38ef88b40b31e70dafddf27ea6f4945..1c600819f7689b451ae4da81922649961de47c08 100644 (file)
@@ -743,6 +743,7 @@ static const struct regmap_config wm8940_regmap = {
        .max_register = WM8940_MONOMIX,
        .reg_defaults = wm8940_reg_defaults,
        .num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults),
+       .cache_type = REGCACHE_RBTREE,
 
        .readable_reg = wm8940_readable_register,
        .volatile_reg = wm8940_volatile_register,
index 0f66fda2c7727c3c0821c97228054234b7fcbdba..237dc67002efbefd600f96bb76997d21890a8b9f 100644 (file)
@@ -1513,8 +1513,9 @@ static struct davinci_mcasp_pdata am33xx_mcasp_pdata = {
 };
 
 static struct davinci_mcasp_pdata dra7_mcasp_pdata = {
-       .tx_dma_offset = 0x200,
-       .rx_dma_offset = 0x284,
+       /* The CFG port offset will be calculated if it is needed */
+       .tx_dma_offset = 0,
+       .rx_dma_offset = 0,
        .version = MCASP_VERSION_4,
 };
 
@@ -1734,6 +1735,52 @@ static int davinci_mcasp_get_dma_type(struct davinci_mcasp *mcasp)
        return PCM_EDMA;
 }
 
+static u32 davinci_mcasp_txdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+       int i;
+       u32 offset = 0;
+
+       if (pdata->version != MCASP_VERSION_4)
+               return pdata->tx_dma_offset;
+
+       for (i = 0; i < pdata->num_serializer; i++) {
+               if (pdata->serial_dir[i] == TX_MODE) {
+                       if (!offset) {
+                               offset = DAVINCI_MCASP_TXBUF_REG(i);
+                       } else {
+                               pr_err("%s: Only one serializer allowed!\n",
+                                      __func__);
+                               break;
+                       }
+               }
+       }
+
+       return offset;
+}
+
+static u32 davinci_mcasp_rxdma_offset(struct davinci_mcasp_pdata *pdata)
+{
+       int i;
+       u32 offset = 0;
+
+       if (pdata->version != MCASP_VERSION_4)
+               return pdata->rx_dma_offset;
+
+       for (i = 0; i < pdata->num_serializer; i++) {
+               if (pdata->serial_dir[i] == RX_MODE) {
+                       if (!offset) {
+                               offset = DAVINCI_MCASP_RXBUF_REG(i);
+                       } else {
+                               pr_err("%s: Only one serializer allowed!\n",
+                                      __func__);
+                               break;
+                       }
+               }
+       }
+
+       return offset;
+}
+
 static int davinci_mcasp_probe(struct platform_device *pdev)
 {
        struct snd_dmaengine_dai_dma_data *dma_data;
@@ -1862,7 +1909,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
        if (dat)
                dma_data->addr = dat->start;
        else
-               dma_data->addr = mem->start + pdata->tx_dma_offset;
+               dma_data->addr = mem->start + davinci_mcasp_txdma_offset(pdata);
 
        dma = &mcasp->dma_request[SNDRV_PCM_STREAM_PLAYBACK];
        res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -1883,7 +1930,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
                if (dat)
                        dma_data->addr = dat->start;
                else
-                       dma_data->addr = mem->start + pdata->rx_dma_offset;
+                       dma_data->addr =
+                               mem->start + davinci_mcasp_rxdma_offset(pdata);
 
                dma = &mcasp->dma_request[SNDRV_PCM_STREAM_CAPTURE];
                res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
index 1e8787fb3fb766bf386e51b629e2634294712a85..afddc8010c5415966d52aa549c2b7115b916d1f0 100644 (file)
@@ -85,9 +85,9 @@
                                                (n << 2))
 
 /* Transmit Buffer for Serializer n */
-#define DAVINCI_MCASP_TXBUF_REG                0x200
+#define DAVINCI_MCASP_TXBUF_REG(n)     (0x200 + (n << 2))
 /* Receive Buffer for Serializer n */
-#define DAVINCI_MCASP_RXBUF_REG                0x280
+#define DAVINCI_MCASP_RXBUF_REG(n)     (0x280 + (n << 2))
 
 /* McASP FIFO Registers */
 #define DAVINCI_MCASP_V2_AFIFO_BASE    (0x1010)
index 632ecc0e39562aab90a880e0c8b678a4ccea1078..bedec4a325813f92d752b16833ccfb0ba31b2db7 100644 (file)
@@ -952,16 +952,16 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
        ssi_private->i2s_mode = CCSR_SSI_SCR_NET;
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
+               regmap_update_bits(regs, CCSR_SSI_STCCR,
+                                  CCSR_SSI_SxCCR_DC_MASK,
+                                  CCSR_SSI_SxCCR_DC(2));
+               regmap_update_bits(regs, CCSR_SSI_SRCCR,
+                                  CCSR_SSI_SxCCR_DC_MASK,
+                                  CCSR_SSI_SxCCR_DC(2));
                switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
                case SND_SOC_DAIFMT_CBM_CFS:
                case SND_SOC_DAIFMT_CBS_CFS:
                        ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_MASTER;
-                       regmap_update_bits(regs, CCSR_SSI_STCCR,
-                                       CCSR_SSI_SxCCR_DC_MASK,
-                                       CCSR_SSI_SxCCR_DC(2));
-                       regmap_update_bits(regs, CCSR_SSI_SRCCR,
-                                       CCSR_SSI_SxCCR_DC_MASK,
-                                       CCSR_SSI_SxCCR_DC(2));
                        break;
                case SND_SOC_DAIFMT_CBM_CFM:
                        ssi_private->i2s_mode |= CCSR_SSI_SCR_I2S_MODE_SLAVE;
index 395168986462d2ef97659281ec1bf088c806b5fc..1bead81bb5106b590259780d4bcdef86dba5fc03 100644 (file)
@@ -182,24 +182,29 @@ static int sst_platform_compr_trigger(struct snd_compr_stream *cstream, int cmd)
        case SNDRV_PCM_TRIGGER_START:
                if (stream->compr_ops->stream_start)
                        return stream->compr_ops->stream_start(sst->dev, stream->id);
+               break;
        case SNDRV_PCM_TRIGGER_STOP:
                if (stream->compr_ops->stream_drop)
                        return stream->compr_ops->stream_drop(sst->dev, stream->id);
+               break;
        case SND_COMPR_TRIGGER_DRAIN:
                if (stream->compr_ops->stream_drain)
                        return stream->compr_ops->stream_drain(sst->dev, stream->id);
+               break;
        case SND_COMPR_TRIGGER_PARTIAL_DRAIN:
                if (stream->compr_ops->stream_partial_drain)
                        return stream->compr_ops->stream_partial_drain(sst->dev, stream->id);
+               break;
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                if (stream->compr_ops->stream_pause)
                        return stream->compr_ops->stream_pause(sst->dev, stream->id);
+               break;
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                if (stream->compr_ops->stream_pause_release)
                        return stream->compr_ops->stream_pause_release(sst->dev, stream->id);
-       default:
-               return -EINVAL;
+               break;
        }
+       return -EINVAL;
 }
 
 static int sst_platform_compr_pointer(struct snd_compr_stream *cstream,
index 965ce40ce7520a37520b116d76ba1885aaaaeb7c..8b95e09e23e8bd3fe9ddd886538b9f07d8a4c07d 100644 (file)
@@ -291,6 +291,7 @@ int bxt_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
        sst_dsp_mailbox_init(sst, (BXT_ADSP_SRAM0_BASE + SKL_ADSP_W0_STAT_SZ),
                        SKL_ADSP_W0_UP_SZ, BXT_ADSP_SRAM1_BASE, SKL_ADSP_W1_SZ);
 
+       INIT_LIST_HEAD(&sst->module_list);
        ret = skl_ipc_init(dev, skl);
        if (ret)
                return ret;
index 49354d17ea553795e45f7f04e4603429dbca70f1..c4c51a4d3c8fd7fdbeec39defae6745500726e1a 100644 (file)
@@ -518,7 +518,7 @@ static void rsnd_adg_get_clkout(struct rsnd_priv *priv,
                }
        }
 
-       rsnd_mod_bset(adg_mod, SSICKR, 0x00FF0000, ckr);
+       rsnd_mod_bset(adg_mod, SSICKR, 0x80FF0000, ckr);
        rsnd_mod_write(adg_mod, BRRA,  rbga);
        rsnd_mod_write(adg_mod, BRRB,  rbgb);
 
index 69860da473ea8b9d7ee853e9dfcdcc804690983f..9e5276d6dda05c999fcba24ff35ab7345513c6da 100644 (file)
@@ -556,7 +556,6 @@ static int usb_audio_probe(struct usb_interface *intf,
                                goto __error;
                        }
                        chip = usb_chip[i];
-                       dev_set_drvdata(&dev->dev, chip);
                        atomic_inc(&chip->active); /* avoid autopm */
                        break;
                }
@@ -582,6 +581,7 @@ static int usb_audio_probe(struct usb_interface *intf,
                        goto __error;
                }
        }
+       dev_set_drvdata(&dev->dev, chip);
 
        /*
         * For devices with more than one control interface, we assume the
diff --git a/tools/arch/alpha/include/uapi/asm/bitsperlong.h b/tools/arch/alpha/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..ad57f78
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef __ASM_ALPHA_BITSPERLONG_H
+#define __ASM_ALPHA_BITSPERLONG_H
+
+#define __BITS_PER_LONG 64
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_ALPHA_BITSPERLONG_H */
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..a2b3eb3
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ */
+
+#ifndef __ARM_KVM_H__
+#define __ARM_KVM_H__
+
+#include <linux/types.h>
+#include <linux/psci.h>
+#include <asm/ptrace.h>
+
+#define __KVM_HAVE_GUEST_DEBUG
+#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_REG_SIZE(id)                                               \
+       (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
+#define KVM_ARM_SVC_sp         svc_regs[0]
+#define KVM_ARM_SVC_lr         svc_regs[1]
+#define KVM_ARM_SVC_spsr       svc_regs[2]
+#define KVM_ARM_ABT_sp         abt_regs[0]
+#define KVM_ARM_ABT_lr         abt_regs[1]
+#define KVM_ARM_ABT_spsr       abt_regs[2]
+#define KVM_ARM_UND_sp         und_regs[0]
+#define KVM_ARM_UND_lr         und_regs[1]
+#define KVM_ARM_UND_spsr       und_regs[2]
+#define KVM_ARM_IRQ_sp         irq_regs[0]
+#define KVM_ARM_IRQ_lr         irq_regs[1]
+#define KVM_ARM_IRQ_spsr       irq_regs[2]
+
+/* Valid only for fiq_regs in struct kvm_regs */
+#define KVM_ARM_FIQ_r8         fiq_regs[0]
+#define KVM_ARM_FIQ_r9         fiq_regs[1]
+#define KVM_ARM_FIQ_r10                fiq_regs[2]
+#define KVM_ARM_FIQ_fp         fiq_regs[3]
+#define KVM_ARM_FIQ_ip         fiq_regs[4]
+#define KVM_ARM_FIQ_sp         fiq_regs[5]
+#define KVM_ARM_FIQ_lr         fiq_regs[6]
+#define KVM_ARM_FIQ_spsr       fiq_regs[7]
+
+struct kvm_regs {
+       struct pt_regs usr_regs;        /* R0_usr - R14_usr, PC, CPSR */
+       unsigned long svc_regs[3];      /* SP_svc, LR_svc, SPSR_svc */
+       unsigned long abt_regs[3];      /* SP_abt, LR_abt, SPSR_abt */
+       unsigned long und_regs[3];      /* SP_und, LR_und, SPSR_und */
+       unsigned long irq_regs[3];      /* SP_irq, LR_irq, SPSR_irq */
+       unsigned long fiq_regs[8];      /* R8_fiq - R14_fiq, SPSR_fiq */
+};
+
+/* Supported Processor Types */
+#define KVM_ARM_TARGET_CORTEX_A15      0
+#define KVM_ARM_TARGET_CORTEX_A7       1
+#define KVM_ARM_NUM_TARGETS            2
+
+/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
+#define KVM_ARM_DEVICE_TYPE_SHIFT      0
+#define KVM_ARM_DEVICE_TYPE_MASK       (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_ID_SHIFT                16
+#define KVM_ARM_DEVICE_ID_MASK         (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
+
+/* Supported device IDs */
+#define KVM_ARM_DEVICE_VGIC_V2         0
+
+/* Supported VGIC address types  */
+#define KVM_VGIC_V2_ADDR_TYPE_DIST     0
+#define KVM_VGIC_V2_ADDR_TYPE_CPU      1
+
+#define KVM_VGIC_V2_DIST_SIZE          0x1000
+#define KVM_VGIC_V2_CPU_SIZE           0x2000
+
+#define KVM_ARM_VCPU_POWER_OFF         0 /* CPU is started in OFF state */
+#define KVM_ARM_VCPU_PSCI_0_2          1 /* CPU uses PSCI v0.2 */
+
+struct kvm_vcpu_init {
+       __u32 target;
+       __u32 features[7];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
+
+struct kvm_guest_debug_arch {
+};
+
+struct kvm_debug_exit_arch {
+};
+
+struct kvm_sync_regs {
+};
+
+struct kvm_arch_memory_slot {
+};
+
+/* If you need to interpret the index values, here is the key: */
+#define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
+#define KVM_REG_ARM_COPROC_SHIFT       16
+#define KVM_REG_ARM_32_OPC2_MASK       0x0000000000000007
+#define KVM_REG_ARM_32_OPC2_SHIFT      0
+#define KVM_REG_ARM_OPC1_MASK          0x0000000000000078
+#define KVM_REG_ARM_OPC1_SHIFT         3
+#define KVM_REG_ARM_CRM_MASK           0x0000000000000780
+#define KVM_REG_ARM_CRM_SHIFT          7
+#define KVM_REG_ARM_32_CRN_MASK                0x0000000000007800
+#define KVM_REG_ARM_32_CRN_SHIFT       11
+
+#define ARM_CP15_REG_SHIFT_MASK(x,n) \
+       (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
+
+#define __ARM_CP15_REG(op1,crn,crm,op2) \
+       (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \
+       ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \
+       ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \
+       ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \
+       ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2))
+
+#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32)
+
+#define __ARM_CP15_REG64(op1,crm) \
+       (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
+#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
+
+#define KVM_REG_ARM_TIMER_CTL          ARM_CP15_REG32(0, 14, 3, 1)
+#define KVM_REG_ARM_TIMER_CNT          ARM_CP15_REG64(1, 14)
+#define KVM_REG_ARM_TIMER_CVAL         ARM_CP15_REG64(3, 14)
+
+/* Normal registers are mapped as coprocessor 16. */
+#define KVM_REG_ARM_CORE               (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_CORE_REG(name)     (offsetof(struct kvm_regs, name) / 4)
+
+/* Some registers need more space to represent values. */
+#define KVM_REG_ARM_DEMUX              (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_DEMUX_ID_MASK      0x000000000000FF00
+#define KVM_REG_ARM_DEMUX_ID_SHIFT     8
+#define KVM_REG_ARM_DEMUX_ID_CCSIDR    (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
+#define KVM_REG_ARM_DEMUX_VAL_MASK     0x00000000000000FF
+#define KVM_REG_ARM_DEMUX_VAL_SHIFT    0
+
+/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
+#define KVM_REG_ARM_VFP                        (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_VFP_MASK           0x000000000000FFFF
+#define KVM_REG_ARM_VFP_BASE_REG       0x0
+#define KVM_REG_ARM_VFP_FPSID          0x1000
+#define KVM_REG_ARM_VFP_FPSCR          0x1001
+#define KVM_REG_ARM_VFP_MVFR1          0x1006
+#define KVM_REG_ARM_VFP_MVFR0          0x1007
+#define KVM_REG_ARM_VFP_FPEXC          0x1008
+#define KVM_REG_ARM_VFP_FPINST         0x1009
+#define KVM_REG_ARM_VFP_FPINST2                0x100A
+
+/* Device Control API: ARM VGIC */
+#define KVM_DEV_ARM_VGIC_GRP_ADDR      0
+#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS  2
+#define   KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
+#define   KVM_DEV_ARM_VGIC_CPUID_MASK  (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT        0
+#define   KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS   3
+#define KVM_DEV_ARM_VGIC_GRP_CTRL       4
+#define   KVM_DEV_ARM_VGIC_CTRL_INIT    0
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_TYPE_SHIFT         24
+#define KVM_ARM_IRQ_TYPE_MASK          0xff
+#define KVM_ARM_IRQ_VCPU_SHIFT         16
+#define KVM_ARM_IRQ_VCPU_MASK          0xff
+#define KVM_ARM_IRQ_NUM_SHIFT          0
+#define KVM_ARM_IRQ_NUM_MASK           0xffff
+
+/* irq_type field */
+#define KVM_ARM_IRQ_TYPE_CPU           0
+#define KVM_ARM_IRQ_TYPE_SPI           1
+#define KVM_ARM_IRQ_TYPE_PPI           2
+
+/* out-of-kernel GIC cpu interrupt injection irq_number field */
+#define KVM_ARM_IRQ_CPU_IRQ            0
+#define KVM_ARM_IRQ_CPU_FIQ            1
+
+/*
+ * This used to hold the highest supported SPI, but it is now obsolete
+ * and only here to provide source code level compatibility with older
+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
+ */
+#ifndef __KERNEL__
+#define KVM_ARM_IRQ_GIC_MAX            127
+#endif
+
+/* One single KVM irqchip, ie. the VGIC */
+#define KVM_NR_IRQCHIPS          1
+
+/* PSCI interface */
+#define KVM_PSCI_FN_BASE               0x95c1ba5e
+#define KVM_PSCI_FN(n)                 (KVM_PSCI_FN_BASE + (n))
+
+#define KVM_PSCI_FN_CPU_SUSPEND                KVM_PSCI_FN(0)
+#define KVM_PSCI_FN_CPU_OFF            KVM_PSCI_FN(1)
+#define KVM_PSCI_FN_CPU_ON             KVM_PSCI_FN(2)
+#define KVM_PSCI_FN_MIGRATE            KVM_PSCI_FN(3)
+
+#define KVM_PSCI_RET_SUCCESS           PSCI_RET_SUCCESS
+#define KVM_PSCI_RET_NI                        PSCI_RET_NOT_SUPPORTED
+#define KVM_PSCI_RET_INVAL             PSCI_RET_INVALID_PARAMS
+#define KVM_PSCI_RET_DENIED            PSCI_RET_DENIED
+
+#endif /* __ARM_KVM_H__ */
diff --git a/tools/arch/arm/include/uapi/asm/perf_regs.h b/tools/arch/arm/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..ce59448
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _ASM_ARM_PERF_REGS_H
+#define _ASM_ARM_PERF_REGS_H
+
+enum perf_event_arm_regs {
+       PERF_REG_ARM_R0,
+       PERF_REG_ARM_R1,
+       PERF_REG_ARM_R2,
+       PERF_REG_ARM_R3,
+       PERF_REG_ARM_R4,
+       PERF_REG_ARM_R5,
+       PERF_REG_ARM_R6,
+       PERF_REG_ARM_R7,
+       PERF_REG_ARM_R8,
+       PERF_REG_ARM_R9,
+       PERF_REG_ARM_R10,
+       PERF_REG_ARM_FP,
+       PERF_REG_ARM_IP,
+       PERF_REG_ARM_SP,
+       PERF_REG_ARM_LR,
+       PERF_REG_ARM_PC,
+       PERF_REG_ARM_MAX,
+};
+#endif /* _ASM_ARM_PERF_REGS_H */
diff --git a/tools/arch/arm64/include/uapi/asm/bitsperlong.h b/tools/arch/arm64/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..fce9c29
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_BITSPERLONG_H
+#define __ASM_BITSPERLONG_H
+
+#define __BITS_PER_LONG 64
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_BITSPERLONG_H */
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..f209ea1
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2012,2013 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * Derived from arch/arm/include/uapi/asm/kvm.h:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_H__
+#define __ARM_KVM_H__
+
+#define KVM_SPSR_EL1   0
+#define KVM_SPSR_SVC   KVM_SPSR_EL1
+#define KVM_SPSR_ABT   1
+#define KVM_SPSR_UND   2
+#define KVM_SPSR_IRQ   3
+#define KVM_SPSR_FIQ   4
+#define KVM_NR_SPSR    5
+
+#ifndef __ASSEMBLY__
+#include <linux/psci.h>
+#include <linux/types.h>
+#include <asm/ptrace.h>
+
+#define __KVM_HAVE_GUEST_DEBUG
+#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_READONLY_MEM
+
+#define KVM_REG_SIZE(id)                                               \
+       (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
+
+struct kvm_regs {
+       struct user_pt_regs regs;       /* sp = sp_el0 */
+
+       __u64   sp_el1;
+       __u64   elr_el1;
+
+       __u64   spsr[KVM_NR_SPSR];
+
+       struct user_fpsimd_state fp_regs;
+};
+
+/*
+ * Supported CPU Targets - Adding a new target type is not recommended,
+ * unless there are some special registers not supported by the
+ * genericv8 syreg table.
+ */
+#define KVM_ARM_TARGET_AEM_V8          0
+#define KVM_ARM_TARGET_FOUNDATION_V8   1
+#define KVM_ARM_TARGET_CORTEX_A57      2
+#define KVM_ARM_TARGET_XGENE_POTENZA   3
+#define KVM_ARM_TARGET_CORTEX_A53      4
+/* Generic ARM v8 target */
+#define KVM_ARM_TARGET_GENERIC_V8      5
+
+#define KVM_ARM_NUM_TARGETS            6
+
+/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
+#define KVM_ARM_DEVICE_TYPE_SHIFT      0
+#define KVM_ARM_DEVICE_TYPE_MASK       (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_ID_SHIFT                16
+#define KVM_ARM_DEVICE_ID_MASK         (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
+
+/* Supported device IDs */
+#define KVM_ARM_DEVICE_VGIC_V2         0
+
+/* Supported VGIC address types  */
+#define KVM_VGIC_V2_ADDR_TYPE_DIST     0
+#define KVM_VGIC_V2_ADDR_TYPE_CPU      1
+
+#define KVM_VGIC_V2_DIST_SIZE          0x1000
+#define KVM_VGIC_V2_CPU_SIZE           0x2000
+
+/* Supported VGICv3 address types  */
+#define KVM_VGIC_V3_ADDR_TYPE_DIST     2
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST   3
+
+#define KVM_VGIC_V3_DIST_SIZE          SZ_64K
+#define KVM_VGIC_V3_REDIST_SIZE                (2 * SZ_64K)
+
+#define KVM_ARM_VCPU_POWER_OFF         0 /* CPU is started in OFF state */
+#define KVM_ARM_VCPU_EL1_32BIT         1 /* CPU running a 32bit VM */
+#define KVM_ARM_VCPU_PSCI_0_2          2 /* CPU uses PSCI v0.2 */
+#define KVM_ARM_VCPU_PMU_V3            3 /* Support guest PMUv3 */
+
+struct kvm_vcpu_init {
+       __u32 target;
+       __u32 features[7];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
+
+/*
+ * See v8 ARM ARM D7.3: Debug Registers
+ *
+ * The architectural limit is 16 debug registers of each type although
+ * in practice there are usually less (see ID_AA64DFR0_EL1).
+ *
+ * Although the control registers are architecturally defined as 32
+ * bits wide we use a 64 bit structure here to keep parity with
+ * KVM_GET/SET_ONE_REG behaviour which treats all system registers as
+ * 64 bit values. It also allows for the possibility of the
+ * architecture expanding the control registers without having to
+ * change the userspace ABI.
+ */
+#define KVM_ARM_MAX_DBG_REGS 16
+struct kvm_guest_debug_arch {
+       __u64 dbg_bcr[KVM_ARM_MAX_DBG_REGS];
+       __u64 dbg_bvr[KVM_ARM_MAX_DBG_REGS];
+       __u64 dbg_wcr[KVM_ARM_MAX_DBG_REGS];
+       __u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
+};
+
+struct kvm_debug_exit_arch {
+       __u32 hsr;
+       __u64 far;      /* used for watchpoints */
+};
+
+/*
+ * Architecture specific defines for kvm_guest_debug->control
+ */
+
+#define KVM_GUESTDBG_USE_SW_BP         (1 << 16)
+#define KVM_GUESTDBG_USE_HW            (1 << 17)
+
+struct kvm_sync_regs {
+};
+
+struct kvm_arch_memory_slot {
+};
+
+/* If you need to interpret the index values, here is the key: */
+#define KVM_REG_ARM_COPROC_MASK                0x000000000FFF0000
+#define KVM_REG_ARM_COPROC_SHIFT       16
+
+/* Normal registers are mapped as coprocessor 16. */
+#define KVM_REG_ARM_CORE               (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_CORE_REG(name)     (offsetof(struct kvm_regs, name) / sizeof(__u32))
+
+/* Some registers need more space to represent values. */
+#define KVM_REG_ARM_DEMUX              (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_DEMUX_ID_MASK      0x000000000000FF00
+#define KVM_REG_ARM_DEMUX_ID_SHIFT     8
+#define KVM_REG_ARM_DEMUX_ID_CCSIDR    (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
+#define KVM_REG_ARM_DEMUX_VAL_MASK     0x00000000000000FF
+#define KVM_REG_ARM_DEMUX_VAL_SHIFT    0
+
+/* AArch64 system registers */
+#define KVM_REG_ARM64_SYSREG           (0x0013 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM64_SYSREG_OP0_MASK  0x000000000000c000
+#define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14
+#define KVM_REG_ARM64_SYSREG_OP1_MASK  0x0000000000003800
+#define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11
+#define KVM_REG_ARM64_SYSREG_CRN_MASK  0x0000000000000780
+#define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7
+#define KVM_REG_ARM64_SYSREG_CRM_MASK  0x0000000000000078
+#define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3
+#define KVM_REG_ARM64_SYSREG_OP2_MASK  0x0000000000000007
+#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0
+
+#define ARM64_SYS_REG_SHIFT_MASK(x,n) \
+       (((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \
+       KVM_REG_ARM64_SYSREG_ ## n ## _MASK)
+
+#define __ARM64_SYS_REG(op0,op1,crn,crm,op2) \
+       (KVM_REG_ARM64 | KVM_REG_ARM64_SYSREG | \
+       ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \
+       ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \
+       ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \
+       ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \
+       ARM64_SYS_REG_SHIFT_MASK(op2, OP2))
+
+#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
+
+#define KVM_REG_ARM_TIMER_CTL          ARM64_SYS_REG(3, 3, 14, 3, 1)
+#define KVM_REG_ARM_TIMER_CNT          ARM64_SYS_REG(3, 3, 14, 3, 2)
+#define KVM_REG_ARM_TIMER_CVAL         ARM64_SYS_REG(3, 3, 14, 0, 2)
+
+/* Device Control API: ARM VGIC */
+#define KVM_DEV_ARM_VGIC_GRP_ADDR      0
+#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS  2
+#define   KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
+#define   KVM_DEV_ARM_VGIC_CPUID_MASK  (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define   KVM_DEV_ARM_VGIC_OFFSET_SHIFT        0
+#define   KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS   3
+#define KVM_DEV_ARM_VGIC_GRP_CTRL      4
+#define   KVM_DEV_ARM_VGIC_CTRL_INIT   0
+
+/* Device Control API on vcpu fd */
+#define KVM_ARM_VCPU_PMU_V3_CTRL       0
+#define   KVM_ARM_VCPU_PMU_V3_IRQ      0
+#define   KVM_ARM_VCPU_PMU_V3_INIT     1
+
+/* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_TYPE_SHIFT         24
+#define KVM_ARM_IRQ_TYPE_MASK          0xff
+#define KVM_ARM_IRQ_VCPU_SHIFT         16
+#define KVM_ARM_IRQ_VCPU_MASK          0xff
+#define KVM_ARM_IRQ_NUM_SHIFT          0
+#define KVM_ARM_IRQ_NUM_MASK           0xffff
+
+/* irq_type field */
+#define KVM_ARM_IRQ_TYPE_CPU           0
+#define KVM_ARM_IRQ_TYPE_SPI           1
+#define KVM_ARM_IRQ_TYPE_PPI           2
+
+/* out-of-kernel GIC cpu interrupt injection irq_number field */
+#define KVM_ARM_IRQ_CPU_IRQ            0
+#define KVM_ARM_IRQ_CPU_FIQ            1
+
+/*
+ * This used to hold the highest supported SPI, but it is now obsolete
+ * and only here to provide source code level compatibility with older
+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
+ */
+#ifndef __KERNEL__
+#define KVM_ARM_IRQ_GIC_MAX            127
+#endif
+
+/* One single KVM irqchip, ie. the VGIC */
+#define KVM_NR_IRQCHIPS          1
+
+/* PSCI interface */
+#define KVM_PSCI_FN_BASE               0x95c1ba5e
+#define KVM_PSCI_FN(n)                 (KVM_PSCI_FN_BASE + (n))
+
+#define KVM_PSCI_FN_CPU_SUSPEND                KVM_PSCI_FN(0)
+#define KVM_PSCI_FN_CPU_OFF            KVM_PSCI_FN(1)
+#define KVM_PSCI_FN_CPU_ON             KVM_PSCI_FN(2)
+#define KVM_PSCI_FN_MIGRATE            KVM_PSCI_FN(3)
+
+#define KVM_PSCI_RET_SUCCESS           PSCI_RET_SUCCESS
+#define KVM_PSCI_RET_NI                        PSCI_RET_NOT_SUPPORTED
+#define KVM_PSCI_RET_INVAL             PSCI_RET_INVALID_PARAMS
+#define KVM_PSCI_RET_DENIED            PSCI_RET_DENIED
+
+#endif
+
+#endif /* __ARM_KVM_H__ */
diff --git a/tools/arch/arm64/include/uapi/asm/perf_regs.h b/tools/arch/arm64/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..172b831
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef _ASM_ARM64_PERF_REGS_H
+#define _ASM_ARM64_PERF_REGS_H
+
+enum perf_event_arm_regs {
+       PERF_REG_ARM64_X0,
+       PERF_REG_ARM64_X1,
+       PERF_REG_ARM64_X2,
+       PERF_REG_ARM64_X3,
+       PERF_REG_ARM64_X4,
+       PERF_REG_ARM64_X5,
+       PERF_REG_ARM64_X6,
+       PERF_REG_ARM64_X7,
+       PERF_REG_ARM64_X8,
+       PERF_REG_ARM64_X9,
+       PERF_REG_ARM64_X10,
+       PERF_REG_ARM64_X11,
+       PERF_REG_ARM64_X12,
+       PERF_REG_ARM64_X13,
+       PERF_REG_ARM64_X14,
+       PERF_REG_ARM64_X15,
+       PERF_REG_ARM64_X16,
+       PERF_REG_ARM64_X17,
+       PERF_REG_ARM64_X18,
+       PERF_REG_ARM64_X19,
+       PERF_REG_ARM64_X20,
+       PERF_REG_ARM64_X21,
+       PERF_REG_ARM64_X22,
+       PERF_REG_ARM64_X23,
+       PERF_REG_ARM64_X24,
+       PERF_REG_ARM64_X25,
+       PERF_REG_ARM64_X26,
+       PERF_REG_ARM64_X27,
+       PERF_REG_ARM64_X28,
+       PERF_REG_ARM64_X29,
+       PERF_REG_ARM64_LR,
+       PERF_REG_ARM64_SP,
+       PERF_REG_ARM64_PC,
+       PERF_REG_ARM64_MAX,
+};
+#endif /* _ASM_ARM64_PERF_REGS_H */
diff --git a/tools/arch/frv/include/uapi/asm/bitsperlong.h b/tools/arch/frv/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..6dc0bb0
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/bitsperlong.h>
diff --git a/tools/arch/h8300/include/asm/bitsperlong.h b/tools/arch/h8300/include/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..e140e46
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __ASM_H8300_BITS_PER_LONG
+#define __ASM_H8300_BITS_PER_LONG
+
+#include <asm-generic/bitsperlong.h>
+
+#if !defined(__ASSEMBLY__)
+/* h8300-unknown-linux required long */
+#define __kernel_size_t __kernel_size_t
+typedef unsigned long  __kernel_size_t;
+typedef long           __kernel_ssize_t;
+typedef long           __kernel_ptrdiff_t;
+#endif
+
+#endif /* __ASM_H8300_BITS_PER_LONG */
diff --git a/tools/arch/hexagon/include/uapi/asm/bitsperlong.h b/tools/arch/hexagon/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..4a65815
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef __ASM_HEXAGON_BITSPERLONG_H
+#define __ASM_HEXAGON_BITSPERLONG_H
+
+#define __BITS_PER_LONG 32
+
+#include <asm-generic/bitsperlong.h>
+
+#endif
diff --git a/tools/arch/ia64/include/uapi/asm/bitsperlong.h b/tools/arch/ia64/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..ec4db3c
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef __ASM_IA64_BITSPERLONG_H
+#define __ASM_IA64_BITSPERLONG_H
+
+#define __BITS_PER_LONG 64
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_IA64_BITSPERLONG_H */
diff --git a/tools/arch/m32r/include/uapi/asm/bitsperlong.h b/tools/arch/m32r/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..6dc0bb0
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/bitsperlong.h>
diff --git a/tools/arch/microblaze/include/uapi/asm/bitsperlong.h b/tools/arch/microblaze/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..6dc0bb0
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/bitsperlong.h>
diff --git a/tools/arch/mips/include/uapi/asm/bitsperlong.h b/tools/arch/mips/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..3e4c10a
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef __ASM_MIPS_BITSPERLONG_H
+#define __ASM_MIPS_BITSPERLONG_H
+
+#define __BITS_PER_LONG _MIPS_SZLONG
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_MIPS_BITSPERLONG_H */
diff --git a/tools/arch/mips/include/uapi/asm/kvm.h b/tools/arch/mips/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..6985eb5
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Copyright (C) 2013 Cavium, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __LINUX_KVM_MIPS_H
+#define __LINUX_KVM_MIPS_H
+
+#include <linux/types.h>
+
+/*
+ * KVM MIPS specific structures and definitions.
+ *
+ * Some parts derived from the x86 version of this file.
+ */
+
+/*
+ * for KVM_GET_REGS and KVM_SET_REGS
+ *
+ * If Config[AT] is zero (32-bit CPU), the register contents are
+ * stored in the lower 32-bits of the struct kvm_regs fields and sign
+ * extended to 64-bits.
+ */
+struct kvm_regs {
+       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+       __u64 gpr[32];
+       __u64 hi;
+       __u64 lo;
+       __u64 pc;
+};
+
+/*
+ * for KVM_GET_FPU and KVM_SET_FPU
+ */
+struct kvm_fpu {
+};
+
+
+/*
+ * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
+ * registers.  The id field is broken down as follows:
+ *
+ *  bits[63..52] - As per linux/kvm.h
+ *  bits[51..32] - Must be zero.
+ *  bits[31..16] - Register set.
+ *
+ * Register set = 0: GP registers from kvm_regs (see definitions below).
+ *
+ * Register set = 1: CP0 registers.
+ *  bits[15..8]  - Must be zero.
+ *  bits[7..3]   - Register 'rd'  index.
+ *  bits[2..0]   - Register 'sel' index.
+ *
+ * Register set = 2: KVM specific registers (see definitions below).
+ *
+ * Register set = 3: FPU / MSA registers (see definitions below).
+ *
+ * Other sets registers may be added in the future.  Each set would
+ * have its own identifier in bits[31..16].
+ */
+
+#define KVM_REG_MIPS_GP                (KVM_REG_MIPS | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_CP0       (KVM_REG_MIPS | 0x0000000000010000ULL)
+#define KVM_REG_MIPS_KVM       (KVM_REG_MIPS | 0x0000000000020000ULL)
+#define KVM_REG_MIPS_FPU       (KVM_REG_MIPS | 0x0000000000030000ULL)
+
+
+/*
+ * KVM_REG_MIPS_GP - General purpose registers from kvm_regs.
+ */
+
+#define KVM_REG_MIPS_R0                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  0)
+#define KVM_REG_MIPS_R1                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  1)
+#define KVM_REG_MIPS_R2                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  2)
+#define KVM_REG_MIPS_R3                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  3)
+#define KVM_REG_MIPS_R4                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  4)
+#define KVM_REG_MIPS_R5                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  5)
+#define KVM_REG_MIPS_R6                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  6)
+#define KVM_REG_MIPS_R7                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  7)
+#define KVM_REG_MIPS_R8                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  8)
+#define KVM_REG_MIPS_R9                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 |  9)
+#define KVM_REG_MIPS_R10       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10)
+#define KVM_REG_MIPS_R11       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11)
+#define KVM_REG_MIPS_R12       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12)
+#define KVM_REG_MIPS_R13       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13)
+#define KVM_REG_MIPS_R14       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14)
+#define KVM_REG_MIPS_R15       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15)
+#define KVM_REG_MIPS_R16       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16)
+#define KVM_REG_MIPS_R17       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17)
+#define KVM_REG_MIPS_R18       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18)
+#define KVM_REG_MIPS_R19       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19)
+#define KVM_REG_MIPS_R20       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20)
+#define KVM_REG_MIPS_R21       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21)
+#define KVM_REG_MIPS_R22       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22)
+#define KVM_REG_MIPS_R23       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23)
+#define KVM_REG_MIPS_R24       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24)
+#define KVM_REG_MIPS_R25       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25)
+#define KVM_REG_MIPS_R26       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26)
+#define KVM_REG_MIPS_R27       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27)
+#define KVM_REG_MIPS_R28       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28)
+#define KVM_REG_MIPS_R29       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29)
+#define KVM_REG_MIPS_R30       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30)
+#define KVM_REG_MIPS_R31       (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31)
+
+#define KVM_REG_MIPS_HI                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32)
+#define KVM_REG_MIPS_LO                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33)
+#define KVM_REG_MIPS_PC                (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
+
+
+/*
+ * KVM_REG_MIPS_KVM - KVM specific control registers.
+ */
+
+/*
+ * CP0_Count control
+ * DC:    Set 0: Master disable CP0_Count and set COUNT_RESUME to now
+ *        Set 1: Master re-enable CP0_Count with unchanged bias, handling timer
+ *               interrupts since COUNT_RESUME
+ *        This can be used to freeze the timer to get a consistent snapshot of
+ *        the CP0_Count and timer interrupt pending state, while also resuming
+ *        safely without losing time or guest timer interrupts.
+ * Other: Reserved, do not change.
+ */
+#define KVM_REG_MIPS_COUNT_CTL     (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_MIPS_COUNT_CTL_DC      0x00000001
+
+/*
+ * CP0_Count resume monotonic nanoseconds
+ * The monotonic nanosecond time of the last set of COUNT_CTL.DC (master
+ * disable). Any reads and writes of Count related registers while
+ * COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is
+ * cleared again (master enable) any timer interrupts since this time will be
+ * emulated.
+ * Modifications to times in the future are rejected.
+ */
+#define KVM_REG_MIPS_COUNT_RESUME   (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1)
+/*
+ * CP0_Count rate in Hz
+ * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
+ * discontinuities in CP0_Count.
+ */
+#define KVM_REG_MIPS_COUNT_HZ      (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2)
+
+
+/*
+ * KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers.
+ *
+ *  bits[15..8]  - Register subset (see definitions below).
+ *  bits[7..5]   - Must be zero.
+ *  bits[4..0]   - Register number within register subset.
+ */
+
+#define KVM_REG_MIPS_FPR       (KVM_REG_MIPS_FPU | 0x0000000000000000ULL)
+#define KVM_REG_MIPS_FCR       (KVM_REG_MIPS_FPU | 0x0000000000000100ULL)
+#define KVM_REG_MIPS_MSACR     (KVM_REG_MIPS_FPU | 0x0000000000000200ULL)
+
+/*
+ * KVM_REG_MIPS_FPR - Floating point / Vector registers.
+ */
+#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32  | (n))
+#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64  | (n))
+#define KVM_REG_MIPS_VEC_128(n)        (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n))
+
+/*
+ * KVM_REG_MIPS_FCR - Floating point control registers.
+ */
+#define KVM_REG_MIPS_FCR_IR    (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 |  0)
+#define KVM_REG_MIPS_FCR_CSR   (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31)
+
+/*
+ * KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers.
+ */
+#define KVM_REG_MIPS_MSA_IR     (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 |  0)
+#define KVM_REG_MIPS_MSA_CSR    (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 |  1)
+
+
+/*
+ * KVM MIPS specific structures and definitions
+ *
+ */
+struct kvm_debug_exit_arch {
+       __u64 epc;
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+/* dummy definition */
+struct kvm_sregs {
+};
+
+struct kvm_mips_interrupt {
+       /* in */
+       __u32 cpu;
+       __u32 irq;
+};
+
+#endif /* __LINUX_KVM_MIPS_H */
diff --git a/tools/arch/mn10300/include/uapi/asm/bitsperlong.h b/tools/arch/mn10300/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..6dc0bb0
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-generic/bitsperlong.h>
diff --git a/tools/arch/parisc/include/uapi/asm/bitsperlong.h b/tools/arch/parisc/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..e0a23c7
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __ASM_PARISC_BITSPERLONG_H
+#define __ASM_PARISC_BITSPERLONG_H
+
+#if defined(__LP64__)
+#define __BITS_PER_LONG 64
+#define SHIFT_PER_LONG 6
+#else
+#define __BITS_PER_LONG 32
+#define SHIFT_PER_LONG 5
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_PARISC_BITSPERLONG_H */
diff --git a/tools/arch/powerpc/include/uapi/asm/bitsperlong.h b/tools/arch/powerpc/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..5f16590
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __ASM_POWERPC_BITSPERLONG_H
+#define __ASM_POWERPC_BITSPERLONG_H
+
+#if defined(__powerpc64__)
+# define __BITS_PER_LONG 64
+#else
+# define __BITS_PER_LONG 32
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_POWERPC_BITSPERLONG_H */
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..c93cf35
--- /dev/null
@@ -0,0 +1,612 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __LINUX_KVM_POWERPC_H
+#define __LINUX_KVM_POWERPC_H
+
+#include <linux/types.h>
+
+/* Select powerpc specific features in <linux/kvm.h> */
+#define __KVM_HAVE_SPAPR_TCE
+#define __KVM_HAVE_PPC_SMT
+#define __KVM_HAVE_IRQCHIP
+#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_GUEST_DEBUG
+
+struct kvm_regs {
+       __u64 pc;
+       __u64 cr;
+       __u64 ctr;
+       __u64 lr;
+       __u64 xer;
+       __u64 msr;
+       __u64 srr0;
+       __u64 srr1;
+       __u64 pid;
+
+       __u64 sprg0;
+       __u64 sprg1;
+       __u64 sprg2;
+       __u64 sprg3;
+       __u64 sprg4;
+       __u64 sprg5;
+       __u64 sprg6;
+       __u64 sprg7;
+
+       __u64 gpr[32];
+};
+
+#define KVM_SREGS_E_IMPL_NONE  0
+#define KVM_SREGS_E_IMPL_FSL   1
+
+#define KVM_SREGS_E_FSL_PIDn   (1 << 0) /* PID1/PID2 */
+
+/*
+ * Feature bits indicate which sections of the sregs struct are valid,
+ * both in KVM_GET_SREGS and KVM_SET_SREGS.  On KVM_SET_SREGS, registers
+ * corresponding to unset feature bits will not be modified.  This allows
+ * restoring a checkpoint made without that feature, while keeping the
+ * default values of the new registers.
+ *
+ * KVM_SREGS_E_BASE contains:
+ * CSRR0/1 (refers to SRR2/3 on 40x)
+ * ESR
+ * DEAR
+ * MCSR
+ * TSR
+ * TCR
+ * DEC
+ * TB
+ * VRSAVE (USPRG0)
+ */
+#define KVM_SREGS_E_BASE               (1 << 0)
+
+/*
+ * KVM_SREGS_E_ARCH206 contains:
+ *
+ * PIR
+ * MCSRR0/1
+ * DECAR
+ * IVPR
+ */
+#define KVM_SREGS_E_ARCH206            (1 << 1)
+
+/*
+ * Contains EPCR, plus the upper half of 64-bit registers
+ * that are 32-bit on 32-bit implementations.
+ */
+#define KVM_SREGS_E_64                 (1 << 2)
+
+#define KVM_SREGS_E_SPRG8              (1 << 3)
+#define KVM_SREGS_E_MCIVPR             (1 << 4)
+
+/*
+ * IVORs are used -- contains IVOR0-15, plus additional IVORs
+ * in combination with an appropriate feature bit.
+ */
+#define KVM_SREGS_E_IVOR               (1 << 5)
+
+/*
+ * Contains MAS0-4, MAS6-7, TLBnCFG, MMUCFG.
+ * Also TLBnPS if MMUCFG[MAVN] = 1.
+ */
+#define KVM_SREGS_E_ARCH206_MMU                (1 << 6)
+
+/* DBSR, DBCR, IAC, DAC, DVC */
+#define KVM_SREGS_E_DEBUG              (1 << 7)
+
+/* Enhanced debug -- DSRR0/1, SPRG9 */
+#define KVM_SREGS_E_ED                 (1 << 8)
+
+/* Embedded Floating Point (SPE) -- IVOR32-34 if KVM_SREGS_E_IVOR */
+#define KVM_SREGS_E_SPE                        (1 << 9)
+
+/*
+ * DEPRECATED! USE ONE_REG FOR THIS ONE!
+ * External Proxy (EXP) -- EPR
+ */
+#define KVM_SREGS_EXP                  (1 << 10)
+
+/* External PID (E.PD) -- EPSC/EPLC */
+#define KVM_SREGS_E_PD                 (1 << 11)
+
+/* Processor Control (E.PC) -- IVOR36-37 if KVM_SREGS_E_IVOR */
+#define KVM_SREGS_E_PC                 (1 << 12)
+
+/* Page table (E.PT) -- EPTCFG */
+#define KVM_SREGS_E_PT                 (1 << 13)
+
+/* Embedded Performance Monitor (E.PM) -- IVOR35 if KVM_SREGS_E_IVOR */
+#define KVM_SREGS_E_PM                 (1 << 14)
+
+/*
+ * Special updates:
+ *
+ * Some registers may change even while a vcpu is not running.
+ * To avoid losing these changes, by default these registers are
+ * not updated by KVM_SET_SREGS.  To force an update, set the bit
+ * in u.e.update_special corresponding to the register to be updated.
+ *
+ * The update_special field is zero on return from KVM_GET_SREGS.
+ *
+ * When restoring a checkpoint, the caller can set update_special
+ * to 0xffffffff to ensure that everything is restored, even new features
+ * that the caller doesn't know about.
+ */
+#define KVM_SREGS_E_UPDATE_MCSR                (1 << 0)
+#define KVM_SREGS_E_UPDATE_TSR         (1 << 1)
+#define KVM_SREGS_E_UPDATE_DEC         (1 << 2)
+#define KVM_SREGS_E_UPDATE_DBSR                (1 << 3)
+
+/*
+ * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a
+ * previous KVM_GET_REGS.
+ *
+ * Unless otherwise indicated, setting any register with KVM_SET_SREGS
+ * directly sets its value.  It does not trigger any special semantics such
+ * as write-one-to-clear.  Calling KVM_SET_SREGS on an unmodified struct
+ * just received from KVM_GET_SREGS is always a no-op.
+ */
+struct kvm_sregs {
+       __u32 pvr;
+       union {
+               struct {
+                       __u64 sdr1;
+                       struct {
+                               struct {
+                                       __u64 slbe;
+                                       __u64 slbv;
+                               } slb[64];
+                       } ppc64;
+                       struct {
+                               __u32 sr[16];
+                               __u64 ibat[8];
+                               __u64 dbat[8];
+                       } ppc32;
+               } s;
+               struct {
+                       union {
+                               struct { /* KVM_SREGS_E_IMPL_FSL */
+                                       __u32 features; /* KVM_SREGS_E_FSL_ */
+                                       __u32 svr;
+                                       __u64 mcar;
+                                       __u32 hid0;
+
+                                       /* KVM_SREGS_E_FSL_PIDn */
+                                       __u32 pid1, pid2;
+                               } fsl;
+                               __u8 pad[256];
+                       } impl;
+
+                       __u32 features; /* KVM_SREGS_E_ */
+                       __u32 impl_id;  /* KVM_SREGS_E_IMPL_ */
+                       __u32 update_special; /* KVM_SREGS_E_UPDATE_ */
+                       __u32 pir;      /* read-only */
+                       __u64 sprg8;
+                       __u64 sprg9;    /* E.ED */
+                       __u64 csrr0;
+                       __u64 dsrr0;    /* E.ED */
+                       __u64 mcsrr0;
+                       __u32 csrr1;
+                       __u32 dsrr1;    /* E.ED */
+                       __u32 mcsrr1;
+                       __u32 esr;
+                       __u64 dear;
+                       __u64 ivpr;
+                       __u64 mcivpr;
+                       __u64 mcsr;     /* KVM_SREGS_E_UPDATE_MCSR */
+
+                       __u32 tsr;      /* KVM_SREGS_E_UPDATE_TSR */
+                       __u32 tcr;
+                       __u32 decar;
+                       __u32 dec;      /* KVM_SREGS_E_UPDATE_DEC */
+
+                       /*
+                        * Userspace can read TB directly, but the
+                        * value reported here is consistent with "dec".
+                        *
+                        * Read-only.
+                        */
+                       __u64 tb;
+
+                       __u32 dbsr;     /* KVM_SREGS_E_UPDATE_DBSR */
+                       __u32 dbcr[3];
+                       /*
+                        * iac/dac registers are 64bit wide, while this API
+                        * interface provides only lower 32 bits on 64 bit
+                        * processors. ONE_REG interface is added for 64bit
+                        * iac/dac registers.
+                        */
+                       __u32 iac[4];
+                       __u32 dac[2];
+                       __u32 dvc[2];
+                       __u8 num_iac;   /* read-only */
+                       __u8 num_dac;   /* read-only */
+                       __u8 num_dvc;   /* read-only */
+                       __u8 pad;
+
+                       __u32 epr;      /* EXP */
+                       __u32 vrsave;   /* a.k.a. USPRG0 */
+                       __u32 epcr;     /* KVM_SREGS_E_64 */
+
+                       __u32 mas0;
+                       __u32 mas1;
+                       __u64 mas2;
+                       __u64 mas7_3;
+                       __u32 mas4;
+                       __u32 mas6;
+
+                       __u32 ivor_low[16]; /* IVOR0-15 */
+                       __u32 ivor_high[18]; /* IVOR32+, plus room to expand */
+
+                       __u32 mmucfg;   /* read-only */
+                       __u32 eptcfg;   /* E.PT, read-only */
+                       __u32 tlbcfg[4];/* read-only */
+                       __u32 tlbps[4]; /* read-only */
+
+                       __u32 eplc, epsc; /* E.PD */
+               } e;
+               __u8 pad[1020];
+       } u;
+};
+
+struct kvm_fpu {
+       __u64 fpr[32];
+};
+
+/*
+ * Defines for h/w breakpoint, watchpoint (read, write or both) and
+ * software breakpoint.
+ * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
+ * for KVM_DEBUG_EXIT.
+ */
+#define KVMPPC_DEBUG_NONE              0x0
+#define KVMPPC_DEBUG_BREAKPOINT                (1UL << 1)
+#define KVMPPC_DEBUG_WATCH_WRITE       (1UL << 2)
+#define KVMPPC_DEBUG_WATCH_READ                (1UL << 3)
+struct kvm_debug_exit_arch {
+       __u64 address;
+       /*
+        * exiting to userspace because of h/w breakpoint, watchpoint
+        * (read, write or both) and software breakpoint.
+        */
+       __u32 status;
+       __u32 reserved;
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+       struct {
+               /* H/W breakpoint/watchpoint address */
+               __u64 addr;
+               /*
+                * Type denotes h/w breakpoint, read watchpoint, write
+                * watchpoint or watchpoint (both read and write).
+                */
+               __u32 type;
+               __u32 reserved;
+       } bp[16];
+};
+
+/* Debug related defines */
+/*
+ * kvm_guest_debug->control is a 32 bit field. The lower 16 bits are generic
+ * and upper 16 bits are architecture specific. Architecture specific defines
+ * that ioctl is for setting hardware breakpoint or software breakpoint.
+ */
+#define KVM_GUESTDBG_USE_SW_BP         0x00010000
+#define KVM_GUESTDBG_USE_HW_BP         0x00020000
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+#define KVM_INTERRUPT_SET      -1U
+#define KVM_INTERRUPT_UNSET    -2U
+#define KVM_INTERRUPT_SET_LEVEL        -3U
+
+#define KVM_CPU_440            1
+#define KVM_CPU_E500V2         2
+#define KVM_CPU_3S_32          3
+#define KVM_CPU_3S_64          4
+#define KVM_CPU_E500MC         5
+
+/* for KVM_CAP_SPAPR_TCE */
+struct kvm_create_spapr_tce {
+       __u64 liobn;
+       __u32 window_size;
+};
+
+/* for KVM_CAP_SPAPR_TCE_64 */
+struct kvm_create_spapr_tce_64 {
+       __u64 liobn;
+       __u32 page_shift;
+       __u32 flags;
+       __u64 offset;   /* in pages */
+       __u64 size;     /* in pages */
+};
+
+/* for KVM_ALLOCATE_RMA */
+struct kvm_allocate_rma {
+       __u64 rma_size;
+};
+
+/* for KVM_CAP_PPC_RTAS */
+struct kvm_rtas_token_args {
+       char name[120];
+       __u64 token;    /* Use a token of 0 to undefine a mapping */
+};
+
+struct kvm_book3e_206_tlb_entry {
+       __u32 mas8;
+       __u32 mas1;
+       __u64 mas2;
+       __u64 mas7_3;
+};
+
+struct kvm_book3e_206_tlb_params {
+       /*
+        * For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
+        *
+        * - The number of ways of TLB0 must be a power of two between 2 and
+        *   16.
+        * - TLB1 must be fully associative.
+        * - The size of TLB0 must be a multiple of the number of ways, and
+        *   the number of sets must be a power of two.
+        * - The size of TLB1 may not exceed 64 entries.
+        * - TLB0 supports 4 KiB pages.
+        * - The page sizes supported by TLB1 are as indicated by
+        *   TLB1CFG (if MMUCFG[MAVN] = 0) or TLB1PS (if MMUCFG[MAVN] = 1)
+        *   as returned by KVM_GET_SREGS.
+        * - TLB2 and TLB3 are reserved, and their entries in tlb_sizes[]
+        *   and tlb_ways[] must be zero.
+        *
+        * tlb_ways[n] = tlb_sizes[n] means the array is fully associative.
+        *
+        * KVM will adjust TLBnCFG based on the sizes configured here,
+        * though arrays greater than 2048 entries will have TLBnCFG[NENTRY]
+        * set to zero.
+        */
+       __u32 tlb_sizes[4];
+       __u32 tlb_ways[4];
+       __u32 reserved[8];
+};
+
+/* For KVM_PPC_GET_HTAB_FD */
+struct kvm_get_htab_fd {
+       __u64   flags;
+       __u64   start_index;
+       __u64   reserved[2];
+};
+
+/* Values for kvm_get_htab_fd.flags */
+#define KVM_GET_HTAB_BOLTED_ONLY       ((__u64)0x1)
+#define KVM_GET_HTAB_WRITE             ((__u64)0x2)
+
+/*
+ * Data read on the file descriptor is formatted as a series of
+ * records, each consisting of a header followed by a series of
+ * `n_valid' HPTEs (16 bytes each), which are all valid.  Following
+ * those valid HPTEs there are `n_invalid' invalid HPTEs, which
+ * are not represented explicitly in the stream.  The same format
+ * is used for writing.
+ */
+struct kvm_get_htab_header {
+       __u32   index;
+       __u16   n_valid;
+       __u16   n_invalid;
+};
+
+/* Per-vcpu XICS interrupt controller state */
+#define KVM_REG_PPC_ICP_STATE  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
+
+#define  KVM_REG_PPC_ICP_CPPR_SHIFT    56      /* current proc priority */
+#define  KVM_REG_PPC_ICP_CPPR_MASK     0xff
+#define  KVM_REG_PPC_ICP_XISR_SHIFT    32      /* interrupt status field */
+#define  KVM_REG_PPC_ICP_XISR_MASK     0xffffff
+#define  KVM_REG_PPC_ICP_MFRR_SHIFT    24      /* pending IPI priority */
+#define  KVM_REG_PPC_ICP_MFRR_MASK     0xff
+#define  KVM_REG_PPC_ICP_PPRI_SHIFT    16      /* pending irq priority */
+#define  KVM_REG_PPC_ICP_PPRI_MASK     0xff
+
+/* Device control API: PPC-specific devices */
+#define KVM_DEV_MPIC_GRP_MISC          1
+#define   KVM_DEV_MPIC_BASE_ADDR       0       /* 64-bit */
+
+#define KVM_DEV_MPIC_GRP_REGISTER      2       /* 32-bit */
+#define KVM_DEV_MPIC_GRP_IRQ_ACTIVE    3       /* 32-bit */
+
+/* One-Reg API: PPC-specific registers */
+#define KVM_REG_PPC_HIOR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
+#define KVM_REG_PPC_IAC1       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2)
+#define KVM_REG_PPC_IAC2       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
+#define KVM_REG_PPC_IAC3       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x4)
+#define KVM_REG_PPC_IAC4       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x5)
+#define KVM_REG_PPC_DAC1       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x6)
+#define KVM_REG_PPC_DAC2       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x7)
+#define KVM_REG_PPC_DABR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8)
+#define KVM_REG_PPC_DSCR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9)
+#define KVM_REG_PPC_PURR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa)
+#define KVM_REG_PPC_SPURR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb)
+#define KVM_REG_PPC_DAR                (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc)
+#define KVM_REG_PPC_DSISR      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd)
+#define KVM_REG_PPC_AMR                (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xe)
+#define KVM_REG_PPC_UAMOR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xf)
+
+#define KVM_REG_PPC_MMCR0      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
+#define KVM_REG_PPC_MMCR1      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
+#define KVM_REG_PPC_MMCRA      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
+#define KVM_REG_PPC_MMCR2      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
+#define KVM_REG_PPC_MMCRS      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
+#define KVM_REG_PPC_SIAR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
+#define KVM_REG_PPC_SDAR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
+#define KVM_REG_PPC_SIER       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
+
+#define KVM_REG_PPC_PMC1       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
+#define KVM_REG_PPC_PMC2       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
+#define KVM_REG_PPC_PMC3       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1a)
+#define KVM_REG_PPC_PMC4       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1b)
+#define KVM_REG_PPC_PMC5       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1c)
+#define KVM_REG_PPC_PMC6       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1d)
+#define KVM_REG_PPC_PMC7       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1e)
+#define KVM_REG_PPC_PMC8       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1f)
+
+/* 32 floating-point registers */
+#define KVM_REG_PPC_FPR0       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x20)
+#define KVM_REG_PPC_FPR(n)     (KVM_REG_PPC_FPR0 + (n))
+#define KVM_REG_PPC_FPR31      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3f)
+
+/* 32 VMX/Altivec vector registers */
+#define KVM_REG_PPC_VR0                (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x40)
+#define KVM_REG_PPC_VR(n)      (KVM_REG_PPC_VR0 + (n))
+#define KVM_REG_PPC_VR31       (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x5f)
+
+/* 32 double-width FP registers for VSX */
+/* High-order halves overlap with FP regs */
+#define KVM_REG_PPC_VSR0       (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x60)
+#define KVM_REG_PPC_VSR(n)     (KVM_REG_PPC_VSR0 + (n))
+#define KVM_REG_PPC_VSR31      (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x7f)
+
+/* FP and vector status/control registers */
+#define KVM_REG_PPC_FPSCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
+/*
+ * VSCR register is documented as a 32-bit register in the ISA, but it can
+ * only be accesses via a vector register. Expose VSCR as a 32-bit register
+ * even though the kernel represents it as a 128-bit vector.
+ */
+#define KVM_REG_PPC_VSCR       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
+
+/* Virtual processor areas */
+/* For SLB & DTL, address in high (first) half, length in low half */
+#define KVM_REG_PPC_VPA_ADDR   (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x82)
+#define KVM_REG_PPC_VPA_SLB    (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83)
+#define KVM_REG_PPC_VPA_DTL    (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
+
+#define KVM_REG_PPC_EPCR       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
+#define KVM_REG_PPC_EPR                (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86)
+
+/* Timer Status Register OR/CLEAR interface */
+#define KVM_REG_PPC_OR_TSR     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x87)
+#define KVM_REG_PPC_CLEAR_TSR  (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x88)
+#define KVM_REG_PPC_TCR                (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x89)
+#define KVM_REG_PPC_TSR                (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8a)
+
+/* Debugging: Special instruction for software breakpoint */
+#define KVM_REG_PPC_DEBUG_INST (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8b)
+
+/* MMU registers */
+#define KVM_REG_PPC_MAS0       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8c)
+#define KVM_REG_PPC_MAS1       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8d)
+#define KVM_REG_PPC_MAS2       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8e)
+#define KVM_REG_PPC_MAS7_3     (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8f)
+#define KVM_REG_PPC_MAS4       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x90)
+#define KVM_REG_PPC_MAS6       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x91)
+#define KVM_REG_PPC_MMUCFG     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x92)
+/*
+ * TLBnCFG fields TLBnCFG_N_ENTRY and TLBnCFG_ASSOC can be changed only using
+ * KVM_CAP_SW_TLB ioctl
+ */
+#define KVM_REG_PPC_TLB0CFG    (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x93)
+#define KVM_REG_PPC_TLB1CFG    (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x94)
+#define KVM_REG_PPC_TLB2CFG    (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x95)
+#define KVM_REG_PPC_TLB3CFG    (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x96)
+#define KVM_REG_PPC_TLB0PS     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x97)
+#define KVM_REG_PPC_TLB1PS     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x98)
+#define KVM_REG_PPC_TLB2PS     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x99)
+#define KVM_REG_PPC_TLB3PS     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
+#define KVM_REG_PPC_EPTCFG     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
+
+/* Timebase offset */
+#define KVM_REG_PPC_TB_OFFSET  (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
+
+/* POWER8 registers */
+#define KVM_REG_PPC_SPMC1      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
+#define KVM_REG_PPC_SPMC2      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
+#define KVM_REG_PPC_IAMR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
+#define KVM_REG_PPC_TFHAR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
+#define KVM_REG_PPC_TFIAR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
+#define KVM_REG_PPC_TEXASR     (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
+#define KVM_REG_PPC_FSCR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
+#define KVM_REG_PPC_PSPB       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
+#define KVM_REG_PPC_EBBHR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
+#define KVM_REG_PPC_EBBRR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
+#define KVM_REG_PPC_BESCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
+#define KVM_REG_PPC_TAR                (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
+#define KVM_REG_PPC_DPDES      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
+#define KVM_REG_PPC_DAWR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
+#define KVM_REG_PPC_DAWRX      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
+#define KVM_REG_PPC_CIABR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
+#define KVM_REG_PPC_IC         (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
+#define KVM_REG_PPC_VTB                (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
+#define KVM_REG_PPC_CSIGR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
+#define KVM_REG_PPC_TACR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
+#define KVM_REG_PPC_TCSCR      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
+#define KVM_REG_PPC_PID                (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
+#define KVM_REG_PPC_ACOP       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
+
+#define KVM_REG_PPC_VRSAVE     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
+#define KVM_REG_PPC_LPCR       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
+#define KVM_REG_PPC_LPCR_64    (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb5)
+#define KVM_REG_PPC_PPR                (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
+
+/* Architecture compatibility level */
+#define KVM_REG_PPC_ARCH_COMPAT        (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
+
+#define KVM_REG_PPC_DABRX      (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
+#define KVM_REG_PPC_WORT       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
+#define KVM_REG_PPC_SPRG9      (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
+#define KVM_REG_PPC_DBSR       (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
+
+/* Transactional Memory checkpointed state:
+ * This is all GPRs, all VSX regs and a subset of SPRs
+ */
+#define KVM_REG_PPC_TM         (KVM_REG_PPC | 0x80000000)
+/* TM GPRs */
+#define KVM_REG_PPC_TM_GPR0    (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
+#define KVM_REG_PPC_TM_GPR(n)  (KVM_REG_PPC_TM_GPR0 + (n))
+#define KVM_REG_PPC_TM_GPR31   (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
+/* TM VSX */
+#define KVM_REG_PPC_TM_VSR0    (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
+#define KVM_REG_PPC_TM_VSR(n)  (KVM_REG_PPC_TM_VSR0 + (n))
+#define KVM_REG_PPC_TM_VSR63   (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
+/* TM SPRS */
+#define KVM_REG_PPC_TM_CR      (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
+#define KVM_REG_PPC_TM_LR      (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
+#define KVM_REG_PPC_TM_CTR     (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
+#define KVM_REG_PPC_TM_FPSCR   (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
+#define KVM_REG_PPC_TM_AMR     (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
+#define KVM_REG_PPC_TM_PPR     (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
+#define KVM_REG_PPC_TM_VRSAVE  (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
+#define KVM_REG_PPC_TM_VSCR    (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
+#define KVM_REG_PPC_TM_DSCR    (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
+#define KVM_REG_PPC_TM_TAR     (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
+
+/* PPC64 eXternal Interrupt Controller Specification */
+#define KVM_DEV_XICS_GRP_SOURCES       1       /* 64-bit source attributes */
+
+/* Layout of 64-bit source attribute values */
+#define  KVM_XICS_DESTINATION_SHIFT    0
+#define  KVM_XICS_DESTINATION_MASK     0xffffffffULL
+#define  KVM_XICS_PRIORITY_SHIFT       32
+#define  KVM_XICS_PRIORITY_MASK                0xff
+#define  KVM_XICS_LEVEL_SENSITIVE      (1ULL << 40)
+#define  KVM_XICS_MASKED               (1ULL << 41)
+#define  KVM_XICS_PENDING              (1ULL << 42)
+
+#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..6a93209
--- /dev/null
@@ -0,0 +1,50 @@
+#ifndef _UAPI_ASM_POWERPC_PERF_REGS_H
+#define _UAPI_ASM_POWERPC_PERF_REGS_H
+
+enum perf_event_powerpc_regs {
+       PERF_REG_POWERPC_R0,
+       PERF_REG_POWERPC_R1,
+       PERF_REG_POWERPC_R2,
+       PERF_REG_POWERPC_R3,
+       PERF_REG_POWERPC_R4,
+       PERF_REG_POWERPC_R5,
+       PERF_REG_POWERPC_R6,
+       PERF_REG_POWERPC_R7,
+       PERF_REG_POWERPC_R8,
+       PERF_REG_POWERPC_R9,
+       PERF_REG_POWERPC_R10,
+       PERF_REG_POWERPC_R11,
+       PERF_REG_POWERPC_R12,
+       PERF_REG_POWERPC_R13,
+       PERF_REG_POWERPC_R14,
+       PERF_REG_POWERPC_R15,
+       PERF_REG_POWERPC_R16,
+       PERF_REG_POWERPC_R17,
+       PERF_REG_POWERPC_R18,
+       PERF_REG_POWERPC_R19,
+       PERF_REG_POWERPC_R20,
+       PERF_REG_POWERPC_R21,
+       PERF_REG_POWERPC_R22,
+       PERF_REG_POWERPC_R23,
+       PERF_REG_POWERPC_R24,
+       PERF_REG_POWERPC_R25,
+       PERF_REG_POWERPC_R26,
+       PERF_REG_POWERPC_R27,
+       PERF_REG_POWERPC_R28,
+       PERF_REG_POWERPC_R29,
+       PERF_REG_POWERPC_R30,
+       PERF_REG_POWERPC_R31,
+       PERF_REG_POWERPC_NIP,
+       PERF_REG_POWERPC_MSR,
+       PERF_REG_POWERPC_ORIG_R3,
+       PERF_REG_POWERPC_CTR,
+       PERF_REG_POWERPC_LINK,
+       PERF_REG_POWERPC_XER,
+       PERF_REG_POWERPC_CCR,
+       PERF_REG_POWERPC_SOFTE,
+       PERF_REG_POWERPC_TRAP,
+       PERF_REG_POWERPC_DAR,
+       PERF_REG_POWERPC_DSISR,
+       PERF_REG_POWERPC_MAX,
+};
+#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/tools/arch/s390/include/uapi/asm/bitsperlong.h b/tools/arch/s390/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..e351ea2
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __ASM_S390_BITSPERLONG_H
+#define __ASM_S390_BITSPERLONG_H
+
+#ifndef __s390x__
+#define __BITS_PER_LONG 32
+#else
+#define __BITS_PER_LONG 64
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_S390_BITSPERLONG_H */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..3b8e99e
--- /dev/null
@@ -0,0 +1,192 @@
+#ifndef __LINUX_KVM_S390_H
+#define __LINUX_KVM_S390_H
+/*
+ * KVM s390 specific structures and definitions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *               Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+#include <linux/types.h>
+
+#define __KVM_S390
+#define __KVM_HAVE_GUEST_DEBUG
+
+/* Device control API: s390-specific devices */
+#define KVM_DEV_FLIC_GET_ALL_IRQS      1
+#define KVM_DEV_FLIC_ENQUEUE           2
+#define KVM_DEV_FLIC_CLEAR_IRQS                3
+#define KVM_DEV_FLIC_APF_ENABLE                4
+#define KVM_DEV_FLIC_APF_DISABLE_WAIT  5
+#define KVM_DEV_FLIC_ADAPTER_REGISTER  6
+#define KVM_DEV_FLIC_ADAPTER_MODIFY    7
+#define KVM_DEV_FLIC_CLEAR_IO_IRQ      8
+/*
+ * We can have up to 4*64k pending subchannels + 8 adapter interrupts,
+ * as well as up  to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
+ * There are also sclp and machine checks. This gives us
+ * sizeof(kvm_s390_irq)*(4*65536+8+64*64+1+1) = 72 * 266250 = 19170000
+ * Lets round up to 8192 pages.
+ */
+#define KVM_S390_MAX_FLOAT_IRQS        266250
+#define KVM_S390_FLIC_MAX_BUFFER       0x2000000
+
+struct kvm_s390_io_adapter {
+       __u32 id;
+       __u8 isc;
+       __u8 maskable;
+       __u8 swap;
+       __u8 pad;
+};
+
+#define KVM_S390_IO_ADAPTER_MASK 1
+#define KVM_S390_IO_ADAPTER_MAP 2
+#define KVM_S390_IO_ADAPTER_UNMAP 3
+
+struct kvm_s390_io_adapter_req {
+       __u32 id;
+       __u8 type;
+       __u8 mask;
+       __u16 pad0;
+       __u64 addr;
+};
+
+/* kvm attr_group  on vm fd */
+#define KVM_S390_VM_MEM_CTRL           0
+#define KVM_S390_VM_TOD                        1
+#define KVM_S390_VM_CRYPTO             2
+#define KVM_S390_VM_CPU_MODEL          3
+
+/* kvm attributes for mem_ctrl */
+#define KVM_S390_VM_MEM_ENABLE_CMMA    0
+#define KVM_S390_VM_MEM_CLR_CMMA       1
+#define KVM_S390_VM_MEM_LIMIT_SIZE     2
+
+#define KVM_S390_NO_MEM_LIMIT          U64_MAX
+
+/* kvm attributes for KVM_S390_VM_TOD */
+#define KVM_S390_VM_TOD_LOW            0
+#define KVM_S390_VM_TOD_HIGH           1
+
+/* kvm attributes for KVM_S390_VM_CPU_MODEL */
+/* processor related attributes are r/w */
+#define KVM_S390_VM_CPU_PROCESSOR      0
+struct kvm_s390_vm_cpu_processor {
+       __u64 cpuid;
+       __u16 ibc;
+       __u8  pad[6];
+       __u64 fac_list[256];
+};
+
+/* machine related attributes are r/o */
+#define KVM_S390_VM_CPU_MACHINE                1
+struct kvm_s390_vm_cpu_machine {
+       __u64 cpuid;
+       __u32 ibc;
+       __u8  pad[4];
+       __u64 fac_mask[256];
+       __u64 fac_list[256];
+};
+
+/* kvm attributes for crypto */
+#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW       0
+#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW       1
+#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW      2
+#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW      3
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+       /* general purpose regs for s390 */
+       __u64 gprs[16];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+       __u32 acrs[16];
+       __u64 crs[16];
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+       __u32 fpc;
+       __u64 fprs[16];
+};
+
+#define KVM_GUESTDBG_USE_HW_BP         0x00010000
+
+#define KVM_HW_BP                      1
+#define KVM_HW_WP_WRITE                        2
+#define KVM_SINGLESTEP                 4
+
+struct kvm_debug_exit_arch {
+       __u64 addr;
+       __u8 type;
+       __u8 pad[7]; /* Should be set to 0 */
+};
+
+struct kvm_hw_breakpoint {
+       __u64 addr;
+       __u64 phys_addr;
+       __u64 len;
+       __u8 type;
+       __u8 pad[7]; /* Should be set to 0 */
+};
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+       __u32 nr_hw_bp;
+       __u32 pad; /* Should be set to 0 */
+       struct kvm_hw_breakpoint __user *hw_bp;
+};
+
+/* for KVM_SYNC_PFAULT and KVM_REG_S390_PFTOKEN */
+#define KVM_S390_PFAULT_TOKEN_INVALID  0xffffffffffffffffULL
+
+#define KVM_SYNC_PREFIX (1UL << 0)
+#define KVM_SYNC_GPRS   (1UL << 1)
+#define KVM_SYNC_ACRS   (1UL << 2)
+#define KVM_SYNC_CRS    (1UL << 3)
+#define KVM_SYNC_ARCH0  (1UL << 4)
+#define KVM_SYNC_PFAULT (1UL << 5)
+#define KVM_SYNC_VRS    (1UL << 6)
+#define KVM_SYNC_RICCB  (1UL << 7)
+#define KVM_SYNC_FPRS   (1UL << 8)
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+       __u64 prefix;   /* prefix register */
+       __u64 gprs[16]; /* general purpose registers */
+       __u32 acrs[16]; /* access registers */
+       __u64 crs[16];  /* control registers */
+       __u64 todpr;    /* tod programmable register [ARCH0] */
+       __u64 cputm;    /* cpu timer [ARCH0] */
+       __u64 ckc;      /* clock comparator [ARCH0] */
+       __u64 pp;       /* program parameter [ARCH0] */
+       __u64 gbea;     /* guest breaking-event address [ARCH0] */
+       __u64 pft;      /* pfault token [PFAULT] */
+       __u64 pfs;      /* pfault select [PFAULT] */
+       __u64 pfc;      /* pfault compare [PFAULT] */
+       union {
+               __u64 vrs[32][2];       /* vector registers (KVM_SYNC_VRS) */
+               __u64 fprs[16];         /* fp registers (KVM_SYNC_FPRS) */
+       };
+       __u8  reserved[512];    /* for future vector expansion */
+       __u32 fpc;              /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
+       __u8 padding[52];       /* riccb needs to be 64byte aligned */
+       __u8 riccb[64];         /* runtime instrumentation controls block */
+};
+
+#define KVM_REG_S390_TODPR     (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
+#define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2)
+#define KVM_REG_S390_CPU_TIMER  (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3)
+#define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4)
+#define KVM_REG_S390_PFTOKEN   (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x5)
+#define KVM_REG_S390_PFCOMPARE (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x6)
+#define KVM_REG_S390_PFSELECT  (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x7)
+#define KVM_REG_S390_PP                (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x8)
+#define KVM_REG_S390_GBEA      (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x9)
+#endif
diff --git a/tools/arch/s390/include/uapi/asm/kvm_perf.h b/tools/arch/s390/include/uapi/asm/kvm_perf.h
new file mode 100644 (file)
index 0000000..3972827
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Definitions for perf-kvm on s390
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_KVM_PERF_S390_H
+#define __LINUX_KVM_PERF_S390_H
+
+#include <asm/sie.h>
+
+#define DECODE_STR_LEN 40
+
+#define VCPU_ID "id"
+
+#define KVM_ENTRY_TRACE "kvm:kvm_s390_sie_enter"
+#define KVM_EXIT_TRACE "kvm:kvm_s390_sie_exit"
+#define KVM_EXIT_REASON "icptcode"
+
+#endif
diff --git a/tools/arch/s390/include/uapi/asm/sie.h b/tools/arch/s390/include/uapi/asm/sie.h
new file mode 100644 (file)
index 0000000..8fb5d4a
--- /dev/null
@@ -0,0 +1,250 @@
+#ifndef _UAPI_ASM_S390_SIE_H
+#define _UAPI_ASM_S390_SIE_H
+
+#define diagnose_codes                                         \
+       { 0x10, "DIAG (0x10) release pages" },                  \
+       { 0x44, "DIAG (0x44) time slice end" },                 \
+       { 0x9c, "DIAG (0x9c) time slice end directed" },        \
+       { 0x204, "DIAG (0x204) logical-cpu utilization" },      \
+       { 0x258, "DIAG (0x258) page-reference services" },      \
+       { 0x288, "DIAG (0x288) watchdog functions" },           \
+       { 0x308, "DIAG (0x308) ipl functions" },                \
+       { 0x500, "DIAG (0x500) KVM virtio functions" },         \
+       { 0x501, "DIAG (0x501) KVM breakpoint" }
+
+#define sigp_order_codes                                       \
+       { 0x01, "SIGP sense" },                                 \
+       { 0x02, "SIGP external call" },                         \
+       { 0x03, "SIGP emergency signal" },                      \
+       { 0x04, "SIGP start" },                                 \
+       { 0x05, "SIGP stop" },                                  \
+       { 0x06, "SIGP restart" },                               \
+       { 0x09, "SIGP stop and store status" },                 \
+       { 0x0b, "SIGP initial cpu reset" },                     \
+       { 0x0c, "SIGP cpu reset" },                             \
+       { 0x0d, "SIGP set prefix" },                            \
+       { 0x0e, "SIGP store status at address" },               \
+       { 0x12, "SIGP set architecture" },                      \
+       { 0x13, "SIGP conditional emergency signal" },          \
+       { 0x15, "SIGP sense running" },                         \
+       { 0x16, "SIGP set multithreading"},                     \
+       { 0x17, "SIGP store additional status ait address"}
+
+#define icpt_prog_codes                                                \
+       { 0x0001, "Prog Operation" },                           \
+       { 0x0002, "Prog Privileged Operation" },                \
+       { 0x0003, "Prog Execute" },                             \
+       { 0x0004, "Prog Protection" },                          \
+       { 0x0005, "Prog Addressing" },                          \
+       { 0x0006, "Prog Specification" },                       \
+       { 0x0007, "Prog Data" },                                \
+       { 0x0008, "Prog Fixedpoint overflow" },                 \
+       { 0x0009, "Prog Fixedpoint divide" },                   \
+       { 0x000A, "Prog Decimal overflow" },                    \
+       { 0x000B, "Prog Decimal divide" },                      \
+       { 0x000C, "Prog HFP exponent overflow" },               \
+       { 0x000D, "Prog HFP exponent underflow" },              \
+       { 0x000E, "Prog HFP significance" },                    \
+       { 0x000F, "Prog HFP divide" },                          \
+       { 0x0010, "Prog Segment translation" },                 \
+       { 0x0011, "Prog Page translation" },                    \
+       { 0x0012, "Prog Translation specification" },           \
+       { 0x0013, "Prog Special operation" },                   \
+       { 0x0015, "Prog Operand" },                             \
+       { 0x0016, "Prog Trace table" },                         \
+       { 0x0017, "Prog ASNtranslation specification" },        \
+       { 0x001C, "Prog Spaceswitch event" },                   \
+       { 0x001D, "Prog HFP square root" },                     \
+       { 0x001F, "Prog PCtranslation specification" },         \
+       { 0x0020, "Prog AFX translation" },                     \
+       { 0x0021, "Prog ASX translation" },                     \
+       { 0x0022, "Prog LX translation" },                      \
+       { 0x0023, "Prog EX translation" },                      \
+       { 0x0024, "Prog Primary authority" },                   \
+       { 0x0025, "Prog Secondary authority" },                 \
+       { 0x0026, "Prog LFXtranslation exception" },            \
+       { 0x0027, "Prog LSXtranslation exception" },            \
+       { 0x0028, "Prog ALET specification" },                  \
+       { 0x0029, "Prog ALEN translation" },                    \
+       { 0x002A, "Prog ALE sequence" },                        \
+       { 0x002B, "Prog ASTE validity" },                       \
+       { 0x002C, "Prog ASTE sequence" },                       \
+       { 0x002D, "Prog Extended authority" },                  \
+       { 0x002E, "Prog LSTE sequence" },                       \
+       { 0x002F, "Prog ASTE instance" },                       \
+       { 0x0030, "Prog Stack full" },                          \
+       { 0x0031, "Prog Stack empty" },                         \
+       { 0x0032, "Prog Stack specification" },                 \
+       { 0x0033, "Prog Stack type" },                          \
+       { 0x0034, "Prog Stack operation" },                     \
+       { 0x0039, "Prog Region first translation" },            \
+       { 0x003A, "Prog Region second translation" },           \
+       { 0x003B, "Prog Region third translation" },            \
+       { 0x0040, "Prog Monitor event" },                       \
+       { 0x0080, "Prog PER event" },                           \
+       { 0x0119, "Prog Crypto operation" }
+
+#define exit_code_ipa0(ipa0, opcode, mnemonic)         \
+       { (ipa0 << 8 | opcode), #ipa0 " " mnemonic }
+#define exit_code(opcode, mnemonic)                    \
+       { opcode, mnemonic }
+
+#define icpt_insn_codes                                \
+       exit_code_ipa0(0x01, 0x01, "PR"),       \
+       exit_code_ipa0(0x01, 0x04, "PTFF"),     \
+       exit_code_ipa0(0x01, 0x07, "SCKPF"),    \
+       exit_code_ipa0(0xAA, 0x00, "RINEXT"),   \
+       exit_code_ipa0(0xAA, 0x01, "RION"),     \
+       exit_code_ipa0(0xAA, 0x02, "TRIC"),     \
+       exit_code_ipa0(0xAA, 0x03, "RIOFF"),    \
+       exit_code_ipa0(0xAA, 0x04, "RIEMIT"),   \
+       exit_code_ipa0(0xB2, 0x02, "STIDP"),    \
+       exit_code_ipa0(0xB2, 0x04, "SCK"),      \
+       exit_code_ipa0(0xB2, 0x05, "STCK"),     \
+       exit_code_ipa0(0xB2, 0x06, "SCKC"),     \
+       exit_code_ipa0(0xB2, 0x07, "STCKC"),    \
+       exit_code_ipa0(0xB2, 0x08, "SPT"),      \
+       exit_code_ipa0(0xB2, 0x09, "STPT"),     \
+       exit_code_ipa0(0xB2, 0x0d, "PTLB"),     \
+       exit_code_ipa0(0xB2, 0x10, "SPX"),      \
+       exit_code_ipa0(0xB2, 0x11, "STPX"),     \
+       exit_code_ipa0(0xB2, 0x12, "STAP"),     \
+       exit_code_ipa0(0xB2, 0x14, "SIE"),      \
+       exit_code_ipa0(0xB2, 0x16, "SETR"),     \
+       exit_code_ipa0(0xB2, 0x17, "STETR"),    \
+       exit_code_ipa0(0xB2, 0x18, "PC"),       \
+       exit_code_ipa0(0xB2, 0x20, "SERVC"),    \
+       exit_code_ipa0(0xB2, 0x21, "IPTE"),     \
+       exit_code_ipa0(0xB2, 0x28, "PT"),       \
+       exit_code_ipa0(0xB2, 0x29, "ISKE"),     \
+       exit_code_ipa0(0xB2, 0x2a, "RRBE"),     \
+       exit_code_ipa0(0xB2, 0x2b, "SSKE"),     \
+       exit_code_ipa0(0xB2, 0x2c, "TB"),       \
+       exit_code_ipa0(0xB2, 0x2e, "PGIN"),     \
+       exit_code_ipa0(0xB2, 0x2f, "PGOUT"),    \
+       exit_code_ipa0(0xB2, 0x30, "CSCH"),     \
+       exit_code_ipa0(0xB2, 0x31, "HSCH"),     \
+       exit_code_ipa0(0xB2, 0x32, "MSCH"),     \
+       exit_code_ipa0(0xB2, 0x33, "SSCH"),     \
+       exit_code_ipa0(0xB2, 0x34, "STSCH"),    \
+       exit_code_ipa0(0xB2, 0x35, "TSCH"),     \
+       exit_code_ipa0(0xB2, 0x36, "TPI"),      \
+       exit_code_ipa0(0xB2, 0x37, "SAL"),      \
+       exit_code_ipa0(0xB2, 0x38, "RSCH"),     \
+       exit_code_ipa0(0xB2, 0x39, "STCRW"),    \
+       exit_code_ipa0(0xB2, 0x3a, "STCPS"),    \
+       exit_code_ipa0(0xB2, 0x3b, "RCHP"),     \
+       exit_code_ipa0(0xB2, 0x3c, "SCHM"),     \
+       exit_code_ipa0(0xB2, 0x40, "BAKR"),     \
+       exit_code_ipa0(0xB2, 0x48, "PALB"),     \
+       exit_code_ipa0(0xB2, 0x4c, "TAR"),      \
+       exit_code_ipa0(0xB2, 0x50, "CSP"),      \
+       exit_code_ipa0(0xB2, 0x54, "MVPG"),     \
+       exit_code_ipa0(0xB2, 0x58, "BSG"),      \
+       exit_code_ipa0(0xB2, 0x5a, "BSA"),      \
+       exit_code_ipa0(0xB2, 0x5f, "CHSC"),     \
+       exit_code_ipa0(0xB2, 0x74, "SIGA"),     \
+       exit_code_ipa0(0xB2, 0x76, "XSCH"),     \
+       exit_code_ipa0(0xB2, 0x78, "STCKE"),    \
+       exit_code_ipa0(0xB2, 0x7c, "STCKF"),    \
+       exit_code_ipa0(0xB2, 0x7d, "STSI"),     \
+       exit_code_ipa0(0xB2, 0xb0, "STFLE"),    \
+       exit_code_ipa0(0xB2, 0xb1, "STFL"),     \
+       exit_code_ipa0(0xB2, 0xb2, "LPSWE"),    \
+       exit_code_ipa0(0xB2, 0xf8, "TEND"),     \
+       exit_code_ipa0(0xB2, 0xfc, "TABORT"),   \
+       exit_code_ipa0(0xB9, 0x1e, "KMAC"),     \
+       exit_code_ipa0(0xB9, 0x28, "PCKMO"),    \
+       exit_code_ipa0(0xB9, 0x2a, "KMF"),      \
+       exit_code_ipa0(0xB9, 0x2b, "KMO"),      \
+       exit_code_ipa0(0xB9, 0x2d, "KMCTR"),    \
+       exit_code_ipa0(0xB9, 0x2e, "KM"),       \
+       exit_code_ipa0(0xB9, 0x2f, "KMC"),      \
+       exit_code_ipa0(0xB9, 0x3e, "KIMD"),     \
+       exit_code_ipa0(0xB9, 0x3f, "KLMD"),     \
+       exit_code_ipa0(0xB9, 0x8a, "CSPG"),     \
+       exit_code_ipa0(0xB9, 0x8d, "EPSW"),     \
+       exit_code_ipa0(0xB9, 0x8e, "IDTE"),     \
+       exit_code_ipa0(0xB9, 0x8f, "CRDTE"),    \
+       exit_code_ipa0(0xB9, 0x9c, "EQBS"),     \
+       exit_code_ipa0(0xB9, 0xa2, "PTF"),      \
+       exit_code_ipa0(0xB9, 0xab, "ESSA"),     \
+       exit_code_ipa0(0xB9, 0xae, "RRBM"),     \
+       exit_code_ipa0(0xB9, 0xaf, "PFMF"),     \
+       exit_code_ipa0(0xE3, 0x03, "LRAG"),     \
+       exit_code_ipa0(0xE3, 0x13, "LRAY"),     \
+       exit_code_ipa0(0xE3, 0x25, "NTSTG"),    \
+       exit_code_ipa0(0xE5, 0x00, "LASP"),     \
+       exit_code_ipa0(0xE5, 0x01, "TPROT"),    \
+       exit_code_ipa0(0xE5, 0x60, "TBEGIN"),   \
+       exit_code_ipa0(0xE5, 0x61, "TBEGINC"),  \
+       exit_code_ipa0(0xEB, 0x25, "STCTG"),    \
+       exit_code_ipa0(0xEB, 0x2f, "LCTLG"),    \
+       exit_code_ipa0(0xEB, 0x60, "LRIC"),     \
+       exit_code_ipa0(0xEB, 0x61, "STRIC"),    \
+       exit_code_ipa0(0xEB, 0x62, "MRIC"),     \
+       exit_code_ipa0(0xEB, 0x8a, "SQBS"),     \
+       exit_code_ipa0(0xC8, 0x01, "ECTG"),     \
+       exit_code(0x0a, "SVC"),                 \
+       exit_code(0x80, "SSM"),                 \
+       exit_code(0x82, "LPSW"),                \
+       exit_code(0x83, "DIAG"),                \
+       exit_code(0xae, "SIGP"),                \
+       exit_code(0xac, "STNSM"),               \
+       exit_code(0xad, "STOSM"),               \
+       exit_code(0xb1, "LRA"),                 \
+       exit_code(0xb6, "STCTL"),               \
+       exit_code(0xb7, "LCTL"),                \
+       exit_code(0xee, "PLO")
+
+#define sie_intercept_code                                     \
+       { 0x00, "Host interruption" },                          \
+       { 0x04, "Instruction" },                                \
+       { 0x08, "Program interruption" },                       \
+       { 0x0c, "Instruction and program interruption" },       \
+       { 0x10, "External request" },                           \
+       { 0x14, "External interruption" },                      \
+       { 0x18, "I/O request" },                                \
+       { 0x1c, "Wait state" },                                 \
+       { 0x20, "Validity" },                                   \
+       { 0x28, "Stop request" },                               \
+       { 0x2c, "Operation exception" },                        \
+       { 0x38, "Partial-execution" },                          \
+       { 0x3c, "I/O interruption" },                           \
+       { 0x40, "I/O instruction" },                            \
+       { 0x48, "Timing subset" }
+
+/*
+ * This is the simple interceptable instructions decoder.
+ *
+ * It will be used as userspace interface and it can be used in places
+ * that does not allow to use general decoder functions,
+ * such as trace events declarations.
+ *
+ * Some userspace tools may want to parse this code
+ * and would be confused by switch(), if() and other statements,
+ * but they can understand conditional operator.
+ */
+#define INSN_DECODE_IPA0(ipa0, insn, rshift, mask)             \
+       (insn >> 56) == (ipa0) ?                                \
+               ((ipa0 << 8) | ((insn >> rshift) & mask)) :
+
+#define INSN_DECODE(insn) (insn >> 56)
+
+/*
+ * The macro icpt_insn_decoder() takes an intercepted instruction
+ * and returns a key, which can be used to find a mnemonic name
+ * of the instruction in the icpt_insn_codes table.
+ */
+#define icpt_insn_decoder(insn) (              \
+       INSN_DECODE_IPA0(0x01, insn, 48, 0xff)  \
+       INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f)  \
+       INSN_DECODE_IPA0(0xb2, insn, 48, 0xff)  \
+       INSN_DECODE_IPA0(0xb9, insn, 48, 0xff)  \
+       INSN_DECODE_IPA0(0xe3, insn, 48, 0xff)  \
+       INSN_DECODE_IPA0(0xe5, insn, 48, 0xff)  \
+       INSN_DECODE_IPA0(0xeb, insn, 16, 0xff)  \
+       INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f)  \
+       INSN_DECODE(insn))
+
+#endif /* _UAPI_ASM_S390_SIE_H */
diff --git a/tools/arch/score/include/uapi/asm/bitsperlong.h b/tools/arch/score/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..86ff337
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _ASM_SCORE_BITSPERLONG_H
+#define _ASM_SCORE_BITSPERLONG_H
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* _ASM_SCORE_BITSPERLONG_H */
diff --git a/tools/arch/sparc/include/uapi/asm/bitsperlong.h b/tools/arch/sparc/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..b62dd90
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __ASM_ALPHA_BITSPERLONG_H
+#define __ASM_ALPHA_BITSPERLONG_H
+
+#if defined(__sparc__) && defined(__arch64__)
+#define __BITS_PER_LONG 64
+#else
+#define __BITS_PER_LONG 32
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_ALPHA_BITSPERLONG_H */
diff --git a/tools/arch/tile/include/uapi/asm/bitsperlong.h b/tools/arch/tile/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..58c771f
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful, but
+ *   WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *   NON INFRINGEMENT.  See the GNU General Public License for
+ *   more details.
+ */
+
+#ifndef _ASM_TILE_BITSPERLONG_H
+#define _ASM_TILE_BITSPERLONG_H
+
+#ifdef __LP64__
+# define __BITS_PER_LONG 64
+#else
+# define __BITS_PER_LONG 32
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* _ASM_TILE_BITSPERLONG_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
new file mode 100644 (file)
index 0000000..4a41348
--- /dev/null
@@ -0,0 +1,316 @@
+#ifndef _ASM_X86_CPUFEATURES_H
+#define _ASM_X86_CPUFEATURES_H
+
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
+#include <asm/required-features.h>
+#endif
+
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#include <asm/disabled-features.h>
+#endif
+
+/*
+ * Defines x86 CPU feature bits
+ */
+#define NCAPINTS       18      /* N 32-bit words worth of info */
+#define NBUGINTS       1       /* N 32-bit bug flags */
+
+/*
+ * Note: If the comment begins with a quoted string, that string is used
+ * in /proc/cpuinfo instead of the macro name.  If the string is "",
+ * this feature bit is not displayed in /proc/cpuinfo at all.
+ */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
+#define X86_FEATURE_FPU                ( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME                ( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE         ( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE                ( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC                ( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR                ( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE                ( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE                ( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8                ( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC       ( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP                ( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR       ( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE                ( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA                ( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV       ( 0*32+15) /* CMOV instructions */
+                                         /* (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT                ( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36      ( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN         ( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH    ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS         ( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI       ( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX                ( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR       ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM                ( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2       ( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP  ( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT         ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC                ( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64       ( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE                ( 0*32+31) /* Pending Break Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL    ( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP         ( 1*32+19) /* MP Capable. */
+#define X86_FEATURE_NX         ( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT     ( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT   ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES    ( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP     ( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM         ( 1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT   ( 1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW      ( 1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY   ( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN    ( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI       ( 2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX      ( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR    ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR  ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR        ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+/* cpu types for specific tunings: */
+#define X86_FEATURE_K8         ( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7         ( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3         ( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4         ( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP         ( 3*32+ 9) /* smp kernel running on up */
+#define X86_FEATURE_ART                ( 3*32+10) /* Platform has always running timer (ART) */
+#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS       ( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS                ( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32  ( 3*32+14) /* "" syscall in ia32 userspace */
+#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
+#define X86_FEATURE_REP_GOOD   ( 3*32+16) /* rep microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
+#define X86_FEATURE_ACC_POWER  ( 3*32+19) /* AMD Accumulated Power Mechanism */
+#define X86_FEATURE_NOPL       ( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS     ( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY  ( 3*32+22) /* cpu topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC        ( 3*32+24) /* TSC does not stop in C states */
+/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
+#define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
+#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
+#define X86_FEATURE_EAGER_FPU  ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
+#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+#define X86_FEATURE_MCE_RECOVERY ( 3*32+31) /* cpu has recoverable machine checks */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3       ( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ  ( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64     ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT      ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
+#define X86_FEATURE_DSCPL      ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
+#define X86_FEATURE_VMX                ( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX                ( 4*32+ 6) /* Safer mode */
+#define X86_FEATURE_EST                ( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2                ( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3      ( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID                ( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG       ( 4*32+11) /* Silicon Debug */
+#define X86_FEATURE_FMA                ( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16       ( 4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR       ( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM       ( 4*32+15) /* Performance Capabilities */
+#define X86_FEATURE_PCID       ( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA                ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1     ( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2     ( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC     ( 4*32+21) /* x2APIC */
+#define X86_FEATURE_MOVBE      ( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT      ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
+#define X86_FEATURE_AES                ( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE      ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_OSXSAVE    ( 4*32+27) /* "" XSAVE enabled in the OS */
+#define X86_FEATURE_AVX                ( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C       ( 4*32+29) /* 16-bit fp conversions */
+#define X86_FEATURE_RDRAND     ( 4*32+30) /* The RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+#define X86_FEATURE_XSTORE     ( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN  ( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT     ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN  ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2       ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN    ( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE                ( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN     ( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM                ( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN     ( 5*32+13) /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM    ( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM                ( 6*32+ 2) /* Secure virtual machine */
+#define X86_FEATURE_EXTAPIC    ( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM                ( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A      ( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW       ( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS                ( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP                ( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT     ( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT                ( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP                ( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4       ( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE                ( 6*32+17) /* translation cache extension */
+#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM                ( 6*32+21) /* trailing bit manipulations */
+#define X86_FEATURE_TOPOEXT    ( 6*32+22) /* topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT      (6*32+26) /* data breakpoint extension */
+#define X86_FEATURE_PTSC       ( 6*32+27) /* performance time-stamp counter */
+#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
+#define X86_FEATURE_MWAITX     ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+
+/*
+ * Auxiliary flags: Linux defined - For features scattered in various
+ * CPUID levels like 0x6, 0xA etc, word 7.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+
+#define X86_FEATURE_CPB                ( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB                ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+
+#define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+
+#define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
+
+/* Virtualization flags: Linux defined, word 8 */
+#define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT         ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID        ( 8*32+ 4) /* Intel Virtual Processor ID */
+
+#define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */
+#define X86_FEATURE_XENPV       ( 8*32+16) /* "" Xen paravirtual guest */
+
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+#define X86_FEATURE_FSGSBASE   ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
+#define X86_FEATURE_BMI1       ( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE                ( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2       ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP       ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2       ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS       ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+#define X86_FEATURE_INVPCID    ( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM                ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM                ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX                ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_AVX512F    ( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_AVX512DQ   ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
+#define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
+#define X86_FEATURE_ADX                ( 9*32+19) /* The ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP       ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_PCOMMIT    ( 9*32+22) /* PCOMMIT instruction */
+#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB       ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_AVX512PF   ( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER   ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD   ( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI     ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
+#define X86_FEATURE_AVX512BW   ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
+#define X86_FEATURE_AVX512VL   ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
+
+/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
+#define X86_FEATURE_XSAVEOPT   (10*32+ 0) /* XSAVEOPT */
+#define X86_FEATURE_XSAVEC     (10*32+ 1) /* XSAVEC */
+#define X86_FEATURE_XGETBV1    (10*32+ 2) /* XGETBV with ECX = 1 */
+#define X86_FEATURE_XSAVES     (10*32+ 3) /* XSAVES/XRSTORS */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
+#define X86_FEATURE_CQM_LLC    (11*32+ 1) /* LLC QoS if 1 */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
+#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
+#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
+
+/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
+#define X86_FEATURE_CLZERO     (13*32+0) /* CLZERO instruction */
+#define X86_FEATURE_IRPERF     (13*32+1) /* Instructions Retired Count */
+
+/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+#define X86_FEATURE_DTHERM     (14*32+ 0) /* Digital Thermal Sensor */
+#define X86_FEATURE_IDA                (14*32+ 1) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT       (14*32+ 2) /* Always Running APIC Timer */
+#define X86_FEATURE_PLN                (14*32+ 4) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS                (14*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_HWP                (14*32+ 7) /* Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
+#define X86_FEATURE_HWP_EPP    (14*32+10) /* HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+
+/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
+#define X86_FEATURE_NPT                (15*32+ 0) /* Nested Page Table support */
+#define X86_FEATURE_LBRV       (15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_SVML       (15*32+ 2) /* "svm_lock" SVM locking MSR */
+#define X86_FEATURE_NRIPS      (15*32+ 3) /* "nrip_save" SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR  (15*32+ 4) /* "tsc_scale" TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN   (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
+#define X86_FEATURE_AVIC       (15*32+13) /* Virtual Interrupt Controller */
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
+#define X86_FEATURE_PKU                (16*32+ 3) /* Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE      (16*32+ 4) /* OS Protection Keys Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
+#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
+#define X86_FEATURE_SUCCOR     (17*32+1) /* Uncorrectable error containment and recovery */
+#define X86_FEATURE_SMCA       (17*32+3) /* Scalable MCA */
+
+/*
+ * BUG word(s)
+ */
+#define X86_BUG(x)             (NCAPINTS*32 + (x))
+
+#define X86_BUG_F00F           X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV           X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA           X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E   X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP           X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#define X86_BUG_NULL_SEG       X86_BUG(9) /* Nulling a selector preserves the base */
+#define X86_BUG_SWAPGS_FENCE   X86_BUG(10) /* SWAPGS without input dep on GS */
+
+
+#ifdef CONFIG_X86_32
+/*
+ * 64-bit kernels don't use X86_BUG_ESPFIX.  Make the define conditional
+ * to avoid confusion.
+ */
+#define X86_BUG_ESPFIX         X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
+#endif
+
+#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
new file mode 100644 (file)
index 0000000..911e935
--- /dev/null
@@ -0,0 +1,60 @@
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#define _ASM_X86_DISABLED_FEATURES_H
+
+/* These features, although they might be available in a CPU
+ * will not be used because the compile options to support
+ * them are not present.
+ *
+ * This code allows them to be checked and disabled at
+ * compile time without an explicit #ifdef.  Use
+ * cpu_feature_enabled().
+ */
+
+#ifdef CONFIG_X86_INTEL_MPX
+# define DISABLE_MPX   0
+#else
+# define DISABLE_MPX   (1<<(X86_FEATURE_MPX & 31))
+#endif
+
+#ifdef CONFIG_X86_64
+# define DISABLE_VME           (1<<(X86_FEATURE_VME & 31))
+# define DISABLE_K6_MTRR       (1<<(X86_FEATURE_K6_MTRR & 31))
+# define DISABLE_CYRIX_ARR     (1<<(X86_FEATURE_CYRIX_ARR & 31))
+# define DISABLE_CENTAUR_MCR   (1<<(X86_FEATURE_CENTAUR_MCR & 31))
+#else
+# define DISABLE_VME           0
+# define DISABLE_K6_MTRR       0
+# define DISABLE_CYRIX_ARR     0
+# define DISABLE_CENTAUR_MCR   0
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+# define DISABLE_PKU           0
+# define DISABLE_OSPKE         0
+#else
+# define DISABLE_PKU           (1<<(X86_FEATURE_PKU & 31))
+# define DISABLE_OSPKE         (1<<(X86_FEATURE_OSPKE & 31))
+#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
+
+/*
+ * Make sure to add features to the correct mask
+ */
+#define DISABLED_MASK0 (DISABLE_VME)
+#define DISABLED_MASK1 0
+#define DISABLED_MASK2 0
+#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
+#define DISABLED_MASK4 0
+#define DISABLED_MASK5 0
+#define DISABLED_MASK6 0
+#define DISABLED_MASK7 0
+#define DISABLED_MASK8 0
+#define DISABLED_MASK9 (DISABLE_MPX)
+#define DISABLED_MASK10        0
+#define DISABLED_MASK11        0
+#define DISABLED_MASK12        0
+#define DISABLED_MASK13        0
+#define DISABLED_MASK14        0
+#define DISABLED_MASK15        0
+#define DISABLED_MASK16        (DISABLE_PKU|DISABLE_OSPKE)
+
+#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
new file mode 100644 (file)
index 0000000..4916144
--- /dev/null
@@ -0,0 +1,103 @@
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
+#define _ASM_X86_REQUIRED_FEATURES_H
+
+/* Define minimum CPUID feature set for kernel These bits are checked
+   really early to actually display a visible error message before the
+   kernel dies.  Make sure to assign features to the proper mask!
+
+   Some requirements that are not in CPUID yet are also in the
+   CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
+
+   The real information is in arch/x86/Kconfig.cpu, this just converts
+   the CONFIGs into a bitmask */
+
+#ifndef CONFIG_MATH_EMULATION
+# define NEED_FPU      (1<<(X86_FEATURE_FPU & 31))
+#else
+# define NEED_FPU      0
+#endif
+
+#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
+# define NEED_PAE      (1<<(X86_FEATURE_PAE & 31))
+#else
+# define NEED_PAE      0
+#endif
+
+#ifdef CONFIG_X86_CMPXCHG64
+# define NEED_CX8      (1<<(X86_FEATURE_CX8 & 31))
+#else
+# define NEED_CX8      0
+#endif
+
+#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
+# define NEED_CMOV     (1<<(X86_FEATURE_CMOV & 31))
+#else
+# define NEED_CMOV     0
+#endif
+
+#ifdef CONFIG_X86_USE_3DNOW
+# define NEED_3DNOW    (1<<(X86_FEATURE_3DNOW & 31))
+#else
+# define NEED_3DNOW    0
+#endif
+
+#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
+# define NEED_NOPL     (1<<(X86_FEATURE_NOPL & 31))
+#else
+# define NEED_NOPL     0
+#endif
+
+#ifdef CONFIG_MATOM
+# define NEED_MOVBE    (1<<(X86_FEATURE_MOVBE & 31))
+#else
+# define NEED_MOVBE    0
+#endif
+
+#ifdef CONFIG_X86_64
+#ifdef CONFIG_PARAVIRT
+/* Paravirtualized systems may not have PSE or PGE available */
+#define NEED_PSE       0
+#define NEED_PGE       0
+#else
+#define NEED_PSE       (1<<(X86_FEATURE_PSE) & 31)
+#define NEED_PGE       (1<<(X86_FEATURE_PGE) & 31)
+#endif
+#define NEED_MSR       (1<<(X86_FEATURE_MSR & 31))
+#define NEED_FXSR      (1<<(X86_FEATURE_FXSR & 31))
+#define NEED_XMM       (1<<(X86_FEATURE_XMM & 31))
+#define NEED_XMM2      (1<<(X86_FEATURE_XMM2 & 31))
+#define NEED_LM                (1<<(X86_FEATURE_LM & 31))
+#else
+#define NEED_PSE       0
+#define NEED_MSR       0
+#define NEED_PGE       0
+#define NEED_FXSR      0
+#define NEED_XMM       0
+#define NEED_XMM2      0
+#define NEED_LM                0
+#endif
+
+#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
+                        NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
+                        NEED_XMM|NEED_XMM2)
+#define SSE_MASK       (NEED_XMM|NEED_XMM2)
+
+#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
+
+#define REQUIRED_MASK2 0
+#define REQUIRED_MASK3 (NEED_NOPL)
+#define REQUIRED_MASK4 (NEED_MOVBE)
+#define REQUIRED_MASK5 0
+#define REQUIRED_MASK6 0
+#define REQUIRED_MASK7 0
+#define REQUIRED_MASK8 0
+#define REQUIRED_MASK9 0
+#define REQUIRED_MASK10        0
+#define REQUIRED_MASK11        0
+#define REQUIRED_MASK12        0
+#define REQUIRED_MASK13        0
+#define REQUIRED_MASK14        0
+#define REQUIRED_MASK15        0
+#define REQUIRED_MASK16        0
+
+#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/unistd_32.h b/tools/arch/x86/include/asm/unistd_32.h
new file mode 100644 (file)
index 0000000..88b3f8c
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __NR_perf_event_open
+# define __NR_perf_event_open 336
+#endif
+#ifndef __NR_futex
+# define __NR_futex 240
+#endif
+#ifndef __NR_gettid
+# define __NR_gettid 224
+#endif
+#ifndef __NR_getcpu
+# define __NR_getcpu 318
+#endif
diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
new file mode 100644 (file)
index 0000000..fbdb70e
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __NR_perf_event_open
+# define __NR_perf_event_open 298
+#endif
+#ifndef __NR_futex
+# define __NR_futex 202
+#endif
+#ifndef __NR_gettid
+# define __NR_gettid 186
+#endif
+#ifndef __NR_getcpu
+# define __NR_getcpu 309
+#endif
diff --git a/tools/arch/x86/include/uapi/asm/bitsperlong.h b/tools/arch/x86/include/uapi/asm/bitsperlong.h
new file mode 100644 (file)
index 0000000..6e23c54
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __ASM_X86_BITSPERLONG_H
+#define __ASM_X86_BITSPERLONG_H
+
+#if defined(__x86_64__) && !defined(__ILP32__)
+# define __BITS_PER_LONG 64
+#else
+# define __BITS_PER_LONG 32
+#endif
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_X86_BITSPERLONG_H */
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
new file mode 100644 (file)
index 0000000..739c0c5
--- /dev/null
@@ -0,0 +1,360 @@
+#ifndef _ASM_X86_KVM_H
+#define _ASM_X86_KVM_H
+
+/*
+ * KVM x86 specific structures and definitions
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define DE_VECTOR 0
+#define DB_VECTOR 1
+#define BP_VECTOR 3
+#define OF_VECTOR 4
+#define BR_VECTOR 5
+#define UD_VECTOR 6
+#define NM_VECTOR 7
+#define DF_VECTOR 8
+#define TS_VECTOR 10
+#define NP_VECTOR 11
+#define SS_VECTOR 12
+#define GP_VECTOR 13
+#define PF_VECTOR 14
+#define MF_VECTOR 16
+#define AC_VECTOR 17
+#define MC_VECTOR 18
+#define XM_VECTOR 19
+#define VE_VECTOR 20
+
+/* Select x86 specific features in <linux/kvm.h> */
+#define __KVM_HAVE_PIT
+#define __KVM_HAVE_IOAPIC
+#define __KVM_HAVE_IRQ_LINE
+#define __KVM_HAVE_MSI
+#define __KVM_HAVE_USER_NMI
+#define __KVM_HAVE_GUEST_DEBUG
+#define __KVM_HAVE_MSIX
+#define __KVM_HAVE_MCE
+#define __KVM_HAVE_PIT_STATE2
+#define __KVM_HAVE_XEN_HVM
+#define __KVM_HAVE_VCPU_EVENTS
+#define __KVM_HAVE_DEBUGREGS
+#define __KVM_HAVE_XSAVE
+#define __KVM_HAVE_XCRS
+#define __KVM_HAVE_READONLY_MEM
+
+/* Architectural interrupt line count. */
+#define KVM_NR_INTERRUPTS 256
+
+struct kvm_memory_alias {
+       __u32 slot;  /* this has a different namespace than memory slots */
+       __u32 flags;
+       __u64 guest_phys_addr;
+       __u64 memory_size;
+       __u64 target_phys_addr;
+};
+
+/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
+struct kvm_pic_state {
+       __u8 last_irr;  /* edge detection */
+       __u8 irr;               /* interrupt request register */
+       __u8 imr;               /* interrupt mask register */
+       __u8 isr;               /* interrupt service register */
+       __u8 priority_add;      /* highest irq priority */
+       __u8 irq_base;
+       __u8 read_reg_select;
+       __u8 poll;
+       __u8 special_mask;
+       __u8 init_state;
+       __u8 auto_eoi;
+       __u8 rotate_on_auto_eoi;
+       __u8 special_fully_nested_mode;
+       __u8 init4;             /* true if 4 byte init */
+       __u8 elcr;              /* PIIX edge/trigger selection */
+       __u8 elcr_mask;
+};
+
+#define KVM_IOAPIC_NUM_PINS  24
+struct kvm_ioapic_state {
+       __u64 base_address;
+       __u32 ioregsel;
+       __u32 id;
+       __u32 irr;
+       __u32 pad;
+       union {
+               __u64 bits;
+               struct {
+                       __u8 vector;
+                       __u8 delivery_mode:3;
+                       __u8 dest_mode:1;
+                       __u8 delivery_status:1;
+                       __u8 polarity:1;
+                       __u8 remote_irr:1;
+                       __u8 trig_mode:1;
+                       __u8 mask:1;
+                       __u8 reserve:7;
+                       __u8 reserved[4];
+                       __u8 dest_id;
+               } fields;
+       } redirtbl[KVM_IOAPIC_NUM_PINS];
+};
+
+#define KVM_IRQCHIP_PIC_MASTER   0
+#define KVM_IRQCHIP_PIC_SLAVE    1
+#define KVM_IRQCHIP_IOAPIC       2
+#define KVM_NR_IRQCHIPS          3
+
+#define KVM_RUN_X86_SMM                 (1 << 0)
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+       __u64 rax, rbx, rcx, rdx;
+       __u64 rsi, rdi, rsp, rbp;
+       __u64 r8,  r9,  r10, r11;
+       __u64 r12, r13, r14, r15;
+       __u64 rip, rflags;
+};
+
+/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
+#define KVM_APIC_REG_SIZE 0x400
+struct kvm_lapic_state {
+       char regs[KVM_APIC_REG_SIZE];
+};
+
+struct kvm_segment {
+       __u64 base;
+       __u32 limit;
+       __u16 selector;
+       __u8  type;
+       __u8  present, dpl, db, s, l, g, avl;
+       __u8  unusable;
+       __u8  padding;
+};
+
+struct kvm_dtable {
+       __u64 base;
+       __u16 limit;
+       __u16 padding[3];
+};
+
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+       /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
+       struct kvm_segment cs, ds, es, fs, gs, ss;
+       struct kvm_segment tr, ldt;
+       struct kvm_dtable gdt, idt;
+       __u64 cr0, cr2, cr3, cr4, cr8;
+       __u64 efer;
+       __u64 apic_base;
+       __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+       __u8  fpr[8][16];
+       __u16 fcw;
+       __u16 fsw;
+       __u8  ftwx;  /* in fxsave format */
+       __u8  pad1;
+       __u16 last_opcode;
+       __u64 last_ip;
+       __u64 last_dp;
+       __u8  xmm[16][16];
+       __u32 mxcsr;
+       __u32 pad2;
+};
+
+struct kvm_msr_entry {
+       __u32 index;
+       __u32 reserved;
+       __u64 data;
+};
+
+/* for KVM_GET_MSRS and KVM_SET_MSRS */
+struct kvm_msrs {
+       __u32 nmsrs; /* number of msrs in entries */
+       __u32 pad;
+
+       struct kvm_msr_entry entries[0];
+};
+
+/* for KVM_GET_MSR_INDEX_LIST */
+struct kvm_msr_list {
+       __u32 nmsrs; /* number of msrs in entries */
+       __u32 indices[0];
+};
+
+
+struct kvm_cpuid_entry {
+       __u32 function;
+       __u32 eax;
+       __u32 ebx;
+       __u32 ecx;
+       __u32 edx;
+       __u32 padding;
+};
+
+/* for KVM_SET_CPUID */
+struct kvm_cpuid {
+       __u32 nent;
+       __u32 padding;
+       struct kvm_cpuid_entry entries[0];
+};
+
+struct kvm_cpuid_entry2 {
+       __u32 function;
+       __u32 index;
+       __u32 flags;
+       __u32 eax;
+       __u32 ebx;
+       __u32 ecx;
+       __u32 edx;
+       __u32 padding[3];
+};
+
+#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX                (1 << 0)
+#define KVM_CPUID_FLAG_STATEFUL_FUNC           (1 << 1)
+#define KVM_CPUID_FLAG_STATE_READ_NEXT         (1 << 2)
+
+/* for KVM_SET_CPUID2 */
+struct kvm_cpuid2 {
+       __u32 nent;
+       __u32 padding;
+       struct kvm_cpuid_entry2 entries[0];
+};
+
+/* for KVM_GET_PIT and KVM_SET_PIT */
+struct kvm_pit_channel_state {
+       __u32 count; /* can be 65536 */
+       __u16 latched_count;
+       __u8 count_latched;
+       __u8 status_latched;
+       __u8 status;
+       __u8 read_state;
+       __u8 write_state;
+       __u8 write_latch;
+       __u8 rw_mode;
+       __u8 mode;
+       __u8 bcd;
+       __u8 gate;
+       __s64 count_load_time;
+};
+
+struct kvm_debug_exit_arch {
+       __u32 exception;
+       __u32 pad;
+       __u64 pc;
+       __u64 dr6;
+       __u64 dr7;
+};
+
+#define KVM_GUESTDBG_USE_SW_BP         0x00010000
+#define KVM_GUESTDBG_USE_HW_BP         0x00020000
+#define KVM_GUESTDBG_INJECT_DB         0x00040000
+#define KVM_GUESTDBG_INJECT_BP         0x00080000
+
+/* for KVM_SET_GUEST_DEBUG */
+struct kvm_guest_debug_arch {
+       __u64 debugreg[8];
+};
+
+struct kvm_pit_state {
+       struct kvm_pit_channel_state channels[3];
+};
+
+#define KVM_PIT_FLAGS_HPET_LEGACY  0x00000001
+
+struct kvm_pit_state2 {
+       struct kvm_pit_channel_state channels[3];
+       __u32 flags;
+       __u32 reserved[9];
+};
+
+struct kvm_reinject_control {
+       __u8 pit_reinject;
+       __u8 reserved[31];
+};
+
+/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
+#define KVM_VCPUEVENT_VALID_NMI_PENDING        0x00000001
+#define KVM_VCPUEVENT_VALID_SIPI_VECTOR        0x00000002
+#define KVM_VCPUEVENT_VALID_SHADOW     0x00000004
+#define KVM_VCPUEVENT_VALID_SMM                0x00000008
+
+/* Interrupt shadow states */
+#define KVM_X86_SHADOW_INT_MOV_SS      0x01
+#define KVM_X86_SHADOW_INT_STI         0x02
+
+/* for KVM_GET/SET_VCPU_EVENTS */
+struct kvm_vcpu_events {
+       struct {
+               __u8 injected;
+               __u8 nr;
+               __u8 has_error_code;
+               __u8 pad;
+               __u32 error_code;
+       } exception;
+       struct {
+               __u8 injected;
+               __u8 nr;
+               __u8 soft;
+               __u8 shadow;
+       } interrupt;
+       struct {
+               __u8 injected;
+               __u8 pending;
+               __u8 masked;
+               __u8 pad;
+       } nmi;
+       __u32 sipi_vector;
+       __u32 flags;
+       struct {
+               __u8 smm;
+               __u8 pending;
+               __u8 smm_inside_nmi;
+               __u8 latched_init;
+       } smi;
+       __u32 reserved[9];
+};
+
+/* for KVM_GET/SET_DEBUGREGS */
+struct kvm_debugregs {
+       __u64 db[4];
+       __u64 dr6;
+       __u64 dr7;
+       __u64 flags;
+       __u64 reserved[9];
+};
+
+/* for KVM_CAP_XSAVE */
+struct kvm_xsave {
+       __u32 region[1024];
+};
+
+#define KVM_MAX_XCRS   16
+
+struct kvm_xcr {
+       __u32 xcr;
+       __u32 reserved;
+       __u64 value;
+};
+
+struct kvm_xcrs {
+       __u32 nr_xcrs;
+       __u32 flags;
+       struct kvm_xcr xcrs[KVM_MAX_XCRS];
+       __u64 padding[16];
+};
+
+/* definition of registers in kvm_run */
+struct kvm_sync_regs {
+};
+
+#define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
+
+#endif /* _ASM_X86_KVM_H */
diff --git a/tools/arch/x86/include/uapi/asm/kvm_perf.h b/tools/arch/x86/include/uapi/asm/kvm_perf.h
new file mode 100644 (file)
index 0000000..3bb964f
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_KVM_PERF_H
+#define _ASM_X86_KVM_PERF_H
+
+#include <asm/svm.h>
+#include <asm/vmx.h>
+#include <asm/kvm.h>
+
+#define DECODE_STR_LEN 20
+
+#define VCPU_ID "vcpu_id"
+
+#define KVM_ENTRY_TRACE "kvm:kvm_entry"
+#define KVM_EXIT_TRACE "kvm:kvm_exit"
+#define KVM_EXIT_REASON "exit_reason"
+
+#endif /* _ASM_X86_KVM_PERF_H */
diff --git a/tools/arch/x86/include/uapi/asm/perf_regs.h b/tools/arch/x86/include/uapi/asm/perf_regs.h
new file mode 100644 (file)
index 0000000..3f2207b
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _ASM_X86_PERF_REGS_H
+#define _ASM_X86_PERF_REGS_H
+
+enum perf_event_x86_regs {
+       PERF_REG_X86_AX,
+       PERF_REG_X86_BX,
+       PERF_REG_X86_CX,
+       PERF_REG_X86_DX,
+       PERF_REG_X86_SI,
+       PERF_REG_X86_DI,
+       PERF_REG_X86_BP,
+       PERF_REG_X86_SP,
+       PERF_REG_X86_IP,
+       PERF_REG_X86_FLAGS,
+       PERF_REG_X86_CS,
+       PERF_REG_X86_SS,
+       PERF_REG_X86_DS,
+       PERF_REG_X86_ES,
+       PERF_REG_X86_FS,
+       PERF_REG_X86_GS,
+       PERF_REG_X86_R8,
+       PERF_REG_X86_R9,
+       PERF_REG_X86_R10,
+       PERF_REG_X86_R11,
+       PERF_REG_X86_R12,
+       PERF_REG_X86_R13,
+       PERF_REG_X86_R14,
+       PERF_REG_X86_R15,
+
+       PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
+       PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
+};
+#endif /* _ASM_X86_PERF_REGS_H */
diff --git a/tools/arch/x86/include/uapi/asm/svm.h b/tools/arch/x86/include/uapi/asm/svm.h
new file mode 100644 (file)
index 0000000..3725e14
--- /dev/null
@@ -0,0 +1,178 @@
+#ifndef _UAPI__SVM_H
+#define _UAPI__SVM_H
+
+#define SVM_EXIT_READ_CR0      0x000
+#define SVM_EXIT_READ_CR2      0x002
+#define SVM_EXIT_READ_CR3      0x003
+#define SVM_EXIT_READ_CR4      0x004
+#define SVM_EXIT_READ_CR8      0x008
+#define SVM_EXIT_WRITE_CR0     0x010
+#define SVM_EXIT_WRITE_CR2     0x012
+#define SVM_EXIT_WRITE_CR3     0x013
+#define SVM_EXIT_WRITE_CR4     0x014
+#define SVM_EXIT_WRITE_CR8     0x018
+#define SVM_EXIT_READ_DR0      0x020
+#define SVM_EXIT_READ_DR1      0x021
+#define SVM_EXIT_READ_DR2      0x022
+#define SVM_EXIT_READ_DR3      0x023
+#define SVM_EXIT_READ_DR4      0x024
+#define SVM_EXIT_READ_DR5      0x025
+#define SVM_EXIT_READ_DR6      0x026
+#define SVM_EXIT_READ_DR7      0x027
+#define SVM_EXIT_WRITE_DR0     0x030
+#define SVM_EXIT_WRITE_DR1     0x031
+#define SVM_EXIT_WRITE_DR2     0x032
+#define SVM_EXIT_WRITE_DR3     0x033
+#define SVM_EXIT_WRITE_DR4     0x034
+#define SVM_EXIT_WRITE_DR5     0x035
+#define SVM_EXIT_WRITE_DR6     0x036
+#define SVM_EXIT_WRITE_DR7     0x037
+#define SVM_EXIT_EXCP_BASE     0x040
+#define SVM_EXIT_INTR          0x060
+#define SVM_EXIT_NMI           0x061
+#define SVM_EXIT_SMI           0x062
+#define SVM_EXIT_INIT          0x063
+#define SVM_EXIT_VINTR         0x064
+#define SVM_EXIT_CR0_SEL_WRITE 0x065
+#define SVM_EXIT_IDTR_READ     0x066
+#define SVM_EXIT_GDTR_READ     0x067
+#define SVM_EXIT_LDTR_READ     0x068
+#define SVM_EXIT_TR_READ       0x069
+#define SVM_EXIT_IDTR_WRITE    0x06a
+#define SVM_EXIT_GDTR_WRITE    0x06b
+#define SVM_EXIT_LDTR_WRITE    0x06c
+#define SVM_EXIT_TR_WRITE      0x06d
+#define SVM_EXIT_RDTSC         0x06e
+#define SVM_EXIT_RDPMC         0x06f
+#define SVM_EXIT_PUSHF         0x070
+#define SVM_EXIT_POPF          0x071
+#define SVM_EXIT_CPUID         0x072
+#define SVM_EXIT_RSM           0x073
+#define SVM_EXIT_IRET          0x074
+#define SVM_EXIT_SWINT         0x075
+#define SVM_EXIT_INVD          0x076
+#define SVM_EXIT_PAUSE         0x077
+#define SVM_EXIT_HLT           0x078
+#define SVM_EXIT_INVLPG        0x079
+#define SVM_EXIT_INVLPGA       0x07a
+#define SVM_EXIT_IOIO          0x07b
+#define SVM_EXIT_MSR           0x07c
+#define SVM_EXIT_TASK_SWITCH   0x07d
+#define SVM_EXIT_FERR_FREEZE   0x07e
+#define SVM_EXIT_SHUTDOWN      0x07f
+#define SVM_EXIT_VMRUN         0x080
+#define SVM_EXIT_VMMCALL       0x081
+#define SVM_EXIT_VMLOAD        0x082
+#define SVM_EXIT_VMSAVE        0x083
+#define SVM_EXIT_STGI          0x084
+#define SVM_EXIT_CLGI          0x085
+#define SVM_EXIT_SKINIT        0x086
+#define SVM_EXIT_RDTSCP        0x087
+#define SVM_EXIT_ICEBP         0x088
+#define SVM_EXIT_WBINVD        0x089
+#define SVM_EXIT_MONITOR       0x08a
+#define SVM_EXIT_MWAIT         0x08b
+#define SVM_EXIT_MWAIT_COND    0x08c
+#define SVM_EXIT_XSETBV        0x08d
+#define SVM_EXIT_NPF           0x400
+#define SVM_EXIT_AVIC_INCOMPLETE_IPI           0x401
+#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS     0x402
+
+#define SVM_EXIT_ERR           -1
+
+#define SVM_EXIT_REASONS \
+       { SVM_EXIT_READ_CR0,    "read_cr0" }, \
+       { SVM_EXIT_READ_CR2,    "read_cr2" }, \
+       { SVM_EXIT_READ_CR3,    "read_cr3" }, \
+       { SVM_EXIT_READ_CR4,    "read_cr4" }, \
+       { SVM_EXIT_READ_CR8,    "read_cr8" }, \
+       { SVM_EXIT_WRITE_CR0,   "write_cr0" }, \
+       { SVM_EXIT_WRITE_CR2,   "write_cr2" }, \
+       { SVM_EXIT_WRITE_CR3,   "write_cr3" }, \
+       { SVM_EXIT_WRITE_CR4,   "write_cr4" }, \
+       { SVM_EXIT_WRITE_CR8,   "write_cr8" }, \
+       { SVM_EXIT_READ_DR0,    "read_dr0" }, \
+       { SVM_EXIT_READ_DR1,    "read_dr1" }, \
+       { SVM_EXIT_READ_DR2,    "read_dr2" }, \
+       { SVM_EXIT_READ_DR3,    "read_dr3" }, \
+       { SVM_EXIT_READ_DR4,    "read_dr4" }, \
+       { SVM_EXIT_READ_DR5,    "read_dr5" }, \
+       { SVM_EXIT_READ_DR6,    "read_dr6" }, \
+       { SVM_EXIT_READ_DR7,    "read_dr7" }, \
+       { SVM_EXIT_WRITE_DR0,   "write_dr0" }, \
+       { SVM_EXIT_WRITE_DR1,   "write_dr1" }, \
+       { SVM_EXIT_WRITE_DR2,   "write_dr2" }, \
+       { SVM_EXIT_WRITE_DR3,   "write_dr3" }, \
+       { SVM_EXIT_WRITE_DR4,   "write_dr4" }, \
+       { SVM_EXIT_WRITE_DR5,   "write_dr5" }, \
+       { SVM_EXIT_WRITE_DR6,   "write_dr6" }, \
+       { SVM_EXIT_WRITE_DR7,   "write_dr7" }, \
+       { SVM_EXIT_EXCP_BASE + DE_VECTOR,       "DE excp" }, \
+       { SVM_EXIT_EXCP_BASE + DB_VECTOR,       "DB excp" }, \
+       { SVM_EXIT_EXCP_BASE + BP_VECTOR,       "BP excp" }, \
+       { SVM_EXIT_EXCP_BASE + OF_VECTOR,       "OF excp" }, \
+       { SVM_EXIT_EXCP_BASE + BR_VECTOR,       "BR excp" }, \
+       { SVM_EXIT_EXCP_BASE + UD_VECTOR,       "UD excp" }, \
+       { SVM_EXIT_EXCP_BASE + NM_VECTOR,       "NM excp" }, \
+       { SVM_EXIT_EXCP_BASE + DF_VECTOR,       "DF excp" }, \
+       { SVM_EXIT_EXCP_BASE + TS_VECTOR,       "TS excp" }, \
+       { SVM_EXIT_EXCP_BASE + NP_VECTOR,       "NP excp" }, \
+       { SVM_EXIT_EXCP_BASE + SS_VECTOR,       "SS excp" }, \
+       { SVM_EXIT_EXCP_BASE + GP_VECTOR,       "GP excp" }, \
+       { SVM_EXIT_EXCP_BASE + PF_VECTOR,       "PF excp" }, \
+       { SVM_EXIT_EXCP_BASE + MF_VECTOR,       "MF excp" }, \
+       { SVM_EXIT_EXCP_BASE + AC_VECTOR,       "AC excp" }, \
+       { SVM_EXIT_EXCP_BASE + MC_VECTOR,       "MC excp" }, \
+       { SVM_EXIT_EXCP_BASE + XM_VECTOR,       "XF excp" }, \
+       { SVM_EXIT_INTR,        "interrupt" }, \
+       { SVM_EXIT_NMI,         "nmi" }, \
+       { SVM_EXIT_SMI,         "smi" }, \
+       { SVM_EXIT_INIT,        "init" }, \
+       { SVM_EXIT_VINTR,       "vintr" }, \
+       { SVM_EXIT_CR0_SEL_WRITE, "cr0_sel_write" }, \
+       { SVM_EXIT_IDTR_READ,   "read_idtr" }, \
+       { SVM_EXIT_GDTR_READ,   "read_gdtr" }, \
+       { SVM_EXIT_LDTR_READ,   "read_ldtr" }, \
+       { SVM_EXIT_TR_READ,     "read_rt" }, \
+       { SVM_EXIT_IDTR_WRITE,  "write_idtr" }, \
+       { SVM_EXIT_GDTR_WRITE,  "write_gdtr" }, \
+       { SVM_EXIT_LDTR_WRITE,  "write_ldtr" }, \
+       { SVM_EXIT_TR_WRITE,    "write_rt" }, \
+       { SVM_EXIT_RDTSC,       "rdtsc" }, \
+       { SVM_EXIT_RDPMC,       "rdpmc" }, \
+       { SVM_EXIT_PUSHF,       "pushf" }, \
+       { SVM_EXIT_POPF,        "popf" }, \
+       { SVM_EXIT_CPUID,       "cpuid" }, \
+       { SVM_EXIT_RSM,         "rsm" }, \
+       { SVM_EXIT_IRET,        "iret" }, \
+       { SVM_EXIT_SWINT,       "swint" }, \
+       { SVM_EXIT_INVD,        "invd" }, \
+       { SVM_EXIT_PAUSE,       "pause" }, \
+       { SVM_EXIT_HLT,         "hlt" }, \
+       { SVM_EXIT_INVLPG,      "invlpg" }, \
+       { SVM_EXIT_INVLPGA,     "invlpga" }, \
+       { SVM_EXIT_IOIO,        "io" }, \
+       { SVM_EXIT_MSR,         "msr" }, \
+       { SVM_EXIT_TASK_SWITCH, "task_switch" }, \
+       { SVM_EXIT_FERR_FREEZE, "ferr_freeze" }, \
+       { SVM_EXIT_SHUTDOWN,    "shutdown" }, \
+       { SVM_EXIT_VMRUN,       "vmrun" }, \
+       { SVM_EXIT_VMMCALL,     "hypercall" }, \
+       { SVM_EXIT_VMLOAD,      "vmload" }, \
+       { SVM_EXIT_VMSAVE,      "vmsave" }, \
+       { SVM_EXIT_STGI,        "stgi" }, \
+       { SVM_EXIT_CLGI,        "clgi" }, \
+       { SVM_EXIT_SKINIT,      "skinit" }, \
+       { SVM_EXIT_RDTSCP,      "rdtscp" }, \
+       { SVM_EXIT_ICEBP,       "icebp" }, \
+       { SVM_EXIT_WBINVD,      "wbinvd" }, \
+       { SVM_EXIT_MONITOR,     "monitor" }, \
+       { SVM_EXIT_MWAIT,       "mwait" }, \
+       { SVM_EXIT_XSETBV,      "xsetbv" }, \
+       { SVM_EXIT_NPF,         "npf" }, \
+       { SVM_EXIT_AVIC_INCOMPLETE_IPI,         "avic_incomplete_ipi" }, \
+       { SVM_EXIT_AVIC_UNACCELERATED_ACCESS,   "avic_unaccelerated_access" }, \
+       { SVM_EXIT_ERR,         "invalid_guest_state" }
+
+
+#endif /* _UAPI__SVM_H */
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
new file mode 100644 (file)
index 0000000..5b15d94
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * vmx.h: VMX Architecture related definitions
+ * Copyright (c) 2004, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ * A few random additions are:
+ * Copyright (C) 2006 Qumranet
+ *    Avi Kivity <avi@qumranet.com>
+ *    Yaniv Kamay <yaniv@qumranet.com>
+ *
+ */
+#ifndef _UAPIVMX_H
+#define _UAPIVMX_H
+
+
+#define VMX_EXIT_REASONS_FAILED_VMENTRY         0x80000000
+
+#define EXIT_REASON_EXCEPTION_NMI       0
+#define EXIT_REASON_EXTERNAL_INTERRUPT  1
+#define EXIT_REASON_TRIPLE_FAULT        2
+
+#define EXIT_REASON_PENDING_INTERRUPT   7
+#define EXIT_REASON_NMI_WINDOW          8
+#define EXIT_REASON_TASK_SWITCH         9
+#define EXIT_REASON_CPUID               10
+#define EXIT_REASON_HLT                 12
+#define EXIT_REASON_INVD                13
+#define EXIT_REASON_INVLPG              14
+#define EXIT_REASON_RDPMC               15
+#define EXIT_REASON_RDTSC               16
+#define EXIT_REASON_VMCALL              18
+#define EXIT_REASON_VMCLEAR             19
+#define EXIT_REASON_VMLAUNCH            20
+#define EXIT_REASON_VMPTRLD             21
+#define EXIT_REASON_VMPTRST             22
+#define EXIT_REASON_VMREAD              23
+#define EXIT_REASON_VMRESUME            24
+#define EXIT_REASON_VMWRITE             25
+#define EXIT_REASON_VMOFF               26
+#define EXIT_REASON_VMON                27
+#define EXIT_REASON_CR_ACCESS           28
+#define EXIT_REASON_DR_ACCESS           29
+#define EXIT_REASON_IO_INSTRUCTION      30
+#define EXIT_REASON_MSR_READ            31
+#define EXIT_REASON_MSR_WRITE           32
+#define EXIT_REASON_INVALID_STATE       33
+#define EXIT_REASON_MSR_LOAD_FAIL       34
+#define EXIT_REASON_MWAIT_INSTRUCTION   36
+#define EXIT_REASON_MONITOR_TRAP_FLAG   37
+#define EXIT_REASON_MONITOR_INSTRUCTION 39
+#define EXIT_REASON_PAUSE_INSTRUCTION   40
+#define EXIT_REASON_MCE_DURING_VMENTRY  41
+#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
+#define EXIT_REASON_APIC_ACCESS         44
+#define EXIT_REASON_EOI_INDUCED         45
+#define EXIT_REASON_EPT_VIOLATION       48
+#define EXIT_REASON_EPT_MISCONFIG       49
+#define EXIT_REASON_INVEPT              50
+#define EXIT_REASON_RDTSCP              51
+#define EXIT_REASON_PREEMPTION_TIMER    52
+#define EXIT_REASON_INVVPID             53
+#define EXIT_REASON_WBINVD              54
+#define EXIT_REASON_XSETBV              55
+#define EXIT_REASON_APIC_WRITE          56
+#define EXIT_REASON_INVPCID             58
+#define EXIT_REASON_PML_FULL            62
+#define EXIT_REASON_XSAVES              63
+#define EXIT_REASON_XRSTORS             64
+#define EXIT_REASON_PCOMMIT             65
+
+#define VMX_EXIT_REASONS \
+       { EXIT_REASON_EXCEPTION_NMI,         "EXCEPTION_NMI" }, \
+       { EXIT_REASON_EXTERNAL_INTERRUPT,    "EXTERNAL_INTERRUPT" }, \
+       { EXIT_REASON_TRIPLE_FAULT,          "TRIPLE_FAULT" }, \
+       { EXIT_REASON_PENDING_INTERRUPT,     "PENDING_INTERRUPT" }, \
+       { EXIT_REASON_NMI_WINDOW,            "NMI_WINDOW" }, \
+       { EXIT_REASON_TASK_SWITCH,           "TASK_SWITCH" }, \
+       { EXIT_REASON_CPUID,                 "CPUID" }, \
+       { EXIT_REASON_HLT,                   "HLT" }, \
+       { EXIT_REASON_INVLPG,                "INVLPG" }, \
+       { EXIT_REASON_RDPMC,                 "RDPMC" }, \
+       { EXIT_REASON_RDTSC,                 "RDTSC" }, \
+       { EXIT_REASON_VMCALL,                "VMCALL" }, \
+       { EXIT_REASON_VMCLEAR,               "VMCLEAR" }, \
+       { EXIT_REASON_VMLAUNCH,              "VMLAUNCH" }, \
+       { EXIT_REASON_VMPTRLD,               "VMPTRLD" }, \
+       { EXIT_REASON_VMPTRST,               "VMPTRST" }, \
+       { EXIT_REASON_VMREAD,                "VMREAD" }, \
+       { EXIT_REASON_VMRESUME,              "VMRESUME" }, \
+       { EXIT_REASON_VMWRITE,               "VMWRITE" }, \
+       { EXIT_REASON_VMOFF,                 "VMOFF" }, \
+       { EXIT_REASON_VMON,                  "VMON" }, \
+       { EXIT_REASON_CR_ACCESS,             "CR_ACCESS" }, \
+       { EXIT_REASON_DR_ACCESS,             "DR_ACCESS" }, \
+       { EXIT_REASON_IO_INSTRUCTION,        "IO_INSTRUCTION" }, \
+       { EXIT_REASON_MSR_READ,              "MSR_READ" }, \
+       { EXIT_REASON_MSR_WRITE,             "MSR_WRITE" }, \
+       { EXIT_REASON_MWAIT_INSTRUCTION,     "MWAIT_INSTRUCTION" }, \
+       { EXIT_REASON_MONITOR_TRAP_FLAG,     "MONITOR_TRAP_FLAG" }, \
+       { EXIT_REASON_MONITOR_INSTRUCTION,   "MONITOR_INSTRUCTION" }, \
+       { EXIT_REASON_PAUSE_INSTRUCTION,     "PAUSE_INSTRUCTION" }, \
+       { EXIT_REASON_MCE_DURING_VMENTRY,    "MCE_DURING_VMENTRY" }, \
+       { EXIT_REASON_TPR_BELOW_THRESHOLD,   "TPR_BELOW_THRESHOLD" }, \
+       { EXIT_REASON_APIC_ACCESS,           "APIC_ACCESS" }, \
+       { EXIT_REASON_EPT_VIOLATION,         "EPT_VIOLATION" }, \
+       { EXIT_REASON_EPT_MISCONFIG,         "EPT_MISCONFIG" }, \
+       { EXIT_REASON_INVEPT,                "INVEPT" }, \
+       { EXIT_REASON_PREEMPTION_TIMER,      "PREEMPTION_TIMER" }, \
+       { EXIT_REASON_WBINVD,                "WBINVD" }, \
+       { EXIT_REASON_APIC_WRITE,            "APIC_WRITE" }, \
+       { EXIT_REASON_EOI_INDUCED,           "EOI_INDUCED" }, \
+       { EXIT_REASON_INVALID_STATE,         "INVALID_STATE" }, \
+       { EXIT_REASON_MSR_LOAD_FAIL,         "MSR_LOAD_FAIL" }, \
+       { EXIT_REASON_INVD,                  "INVD" }, \
+       { EXIT_REASON_INVVPID,               "INVVPID" }, \
+       { EXIT_REASON_INVPCID,               "INVPCID" }, \
+       { EXIT_REASON_XSAVES,                "XSAVES" }, \
+       { EXIT_REASON_XRSTORS,               "XRSTORS" }, \
+       { EXIT_REASON_PCOMMIT,               "PCOMMIT" }
+
+#define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
+#define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
+
+#endif /* _UAPIVMX_H */
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
new file mode 100644 (file)
index 0000000..2ec0b0a
--- /dev/null
@@ -0,0 +1,297 @@
+/* Copyright 2002 Andi Kleen */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative-asm.h>
+
+/*
+ * We build a jump to memcpy_orig by default which gets NOPped out on
+ * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
+ * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
+ * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
+ */
+
+.weak memcpy
+
+/*
+ * memcpy - Copy a memory block.
+ *
+ * Input:
+ *  rdi destination
+ *  rsi source
+ *  rdx count
+ *
+ * Output:
+ * rax original destination
+ */
+ENTRY(__memcpy)
+ENTRY(memcpy)
+       ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
+                     "jmp memcpy_erms", X86_FEATURE_ERMS
+
+       movq %rdi, %rax
+       movq %rdx, %rcx
+       shrq $3, %rcx
+       andl $7, %edx
+       rep movsq
+       movl %edx, %ecx
+       rep movsb
+       ret
+ENDPROC(memcpy)
+ENDPROC(__memcpy)
+
+/*
+ * memcpy_erms() - enhanced fast string memcpy. This is faster and
+ * simpler than memcpy. Use memcpy_erms when possible.
+ */
+ENTRY(memcpy_erms)
+       movq %rdi, %rax
+       movq %rdx, %rcx
+       rep movsb
+       ret
+ENDPROC(memcpy_erms)
+
+ENTRY(memcpy_orig)
+       movq %rdi, %rax
+
+       cmpq $0x20, %rdx
+       jb .Lhandle_tail
+
+       /*
+        * We check whether memory false dependence could occur,
+        * then jump to corresponding copy mode.
+        */
+       cmp  %dil, %sil
+       jl .Lcopy_backward
+       subq $0x20, %rdx
+.Lcopy_forward_loop:
+       subq $0x20,     %rdx
+
+       /*
+        * Move in blocks of 4x8 bytes:
+        */
+       movq 0*8(%rsi), %r8
+       movq 1*8(%rsi), %r9
+       movq 2*8(%rsi), %r10
+       movq 3*8(%rsi), %r11
+       leaq 4*8(%rsi), %rsi
+
+       movq %r8,       0*8(%rdi)
+       movq %r9,       1*8(%rdi)
+       movq %r10,      2*8(%rdi)
+       movq %r11,      3*8(%rdi)
+       leaq 4*8(%rdi), %rdi
+       jae  .Lcopy_forward_loop
+       addl $0x20,     %edx
+       jmp  .Lhandle_tail
+
+.Lcopy_backward:
+       /*
+        * Calculate copy position to tail.
+        */
+       addq %rdx,      %rsi
+       addq %rdx,      %rdi
+       subq $0x20,     %rdx
+       /*
+        * At most 3 ALU operations in one cycle,
+        * so append NOPS in the same 16 bytes trunk.
+        */
+       .p2align 4
+.Lcopy_backward_loop:
+       subq $0x20,     %rdx
+       movq -1*8(%rsi),        %r8
+       movq -2*8(%rsi),        %r9
+       movq -3*8(%rsi),        %r10
+       movq -4*8(%rsi),        %r11
+       leaq -4*8(%rsi),        %rsi
+       movq %r8,               -1*8(%rdi)
+       movq %r9,               -2*8(%rdi)
+       movq %r10,              -3*8(%rdi)
+       movq %r11,              -4*8(%rdi)
+       leaq -4*8(%rdi),        %rdi
+       jae  .Lcopy_backward_loop
+
+       /*
+        * Calculate copy position to head.
+        */
+       addl $0x20,     %edx
+       subq %rdx,      %rsi
+       subq %rdx,      %rdi
+.Lhandle_tail:
+       cmpl $16,       %edx
+       jb   .Lless_16bytes
+
+       /*
+        * Move data from 16 bytes to 31 bytes.
+        */
+       movq 0*8(%rsi), %r8
+       movq 1*8(%rsi), %r9
+       movq -2*8(%rsi, %rdx),  %r10
+       movq -1*8(%rsi, %rdx),  %r11
+       movq %r8,       0*8(%rdi)
+       movq %r9,       1*8(%rdi)
+       movq %r10,      -2*8(%rdi, %rdx)
+       movq %r11,      -1*8(%rdi, %rdx)
+       retq
+       .p2align 4
+.Lless_16bytes:
+       cmpl $8,        %edx
+       jb   .Lless_8bytes
+       /*
+        * Move data from 8 bytes to 15 bytes.
+        */
+       movq 0*8(%rsi), %r8
+       movq -1*8(%rsi, %rdx),  %r9
+       movq %r8,       0*8(%rdi)
+       movq %r9,       -1*8(%rdi, %rdx)
+       retq
+       .p2align 4
+.Lless_8bytes:
+       cmpl $4,        %edx
+       jb   .Lless_3bytes
+
+       /*
+        * Move data from 4 bytes to 7 bytes.
+        */
+       movl (%rsi), %ecx
+       movl -4(%rsi, %rdx), %r8d
+       movl %ecx, (%rdi)
+       movl %r8d, -4(%rdi, %rdx)
+       retq
+       .p2align 4
+.Lless_3bytes:
+       subl $1, %edx
+       jb .Lend
+       /*
+        * Move data from 1 bytes to 3 bytes.
+        */
+       movzbl (%rsi), %ecx
+       jz .Lstore_1byte
+       movzbq 1(%rsi), %r8
+       movzbq (%rsi, %rdx), %r9
+       movb %r8b, 1(%rdi)
+       movb %r9b, (%rdi, %rdx)
+.Lstore_1byte:
+       movb %cl, (%rdi)
+
+.Lend:
+       retq
+ENDPROC(memcpy_orig)
+
+#ifndef CONFIG_UML
+/*
+ * memcpy_mcsafe - memory copy with machine check exception handling
+ * Note that we only catch machine checks when reading the source addresses.
+ * Writes to target are posted and don't generate machine checks.
+ */
+ENTRY(memcpy_mcsafe)
+       cmpl $8, %edx
+       /* Less than 8 bytes? Go to byte copy loop */
+       jb .L_no_whole_words
+
+       /* Check for bad alignment of source */
+       testl $7, %esi
+       /* Already aligned */
+       jz .L_8byte_aligned
+
+       /* Copy one byte at a time until source is 8-byte aligned */
+       movl %esi, %ecx
+       andl $7, %ecx
+       subl $8, %ecx
+       negl %ecx
+       subl %ecx, %edx
+.L_copy_leading_bytes:
+       movb (%rsi), %al
+       movb %al, (%rdi)
+       incq %rsi
+       incq %rdi
+       decl %ecx
+       jnz .L_copy_leading_bytes
+
+.L_8byte_aligned:
+       /* Figure out how many whole cache lines (64-bytes) to copy */
+       movl %edx, %ecx
+       andl $63, %edx
+       shrl $6, %ecx
+       jz .L_no_whole_cache_lines
+
+       /* Loop copying whole cache lines */
+.L_cache_w0: movq (%rsi), %r8
+.L_cache_w1: movq 1*8(%rsi), %r9
+.L_cache_w2: movq 2*8(%rsi), %r10
+.L_cache_w3: movq 3*8(%rsi), %r11
+       movq %r8, (%rdi)
+       movq %r9, 1*8(%rdi)
+       movq %r10, 2*8(%rdi)
+       movq %r11, 3*8(%rdi)
+.L_cache_w4: movq 4*8(%rsi), %r8
+.L_cache_w5: movq 5*8(%rsi), %r9
+.L_cache_w6: movq 6*8(%rsi), %r10
+.L_cache_w7: movq 7*8(%rsi), %r11
+       movq %r8, 4*8(%rdi)
+       movq %r9, 5*8(%rdi)
+       movq %r10, 6*8(%rdi)
+       movq %r11, 7*8(%rdi)
+       leaq 64(%rsi), %rsi
+       leaq 64(%rdi), %rdi
+       decl %ecx
+       jnz .L_cache_w0
+
+       /* Are there any trailing 8-byte words? */
+.L_no_whole_cache_lines:
+       movl %edx, %ecx
+       andl $7, %edx
+       shrl $3, %ecx
+       jz .L_no_whole_words
+
+       /* Copy trailing words */
+.L_copy_trailing_words:
+       movq (%rsi), %r8
+       mov %r8, (%rdi)
+       leaq 8(%rsi), %rsi
+       leaq 8(%rdi), %rdi
+       decl %ecx
+       jnz .L_copy_trailing_words
+
+       /* Any trailing bytes? */
+.L_no_whole_words:
+       andl %edx, %edx
+       jz .L_done_memcpy_trap
+
+       /* Copy trailing bytes */
+       movl %edx, %ecx
+.L_copy_trailing_bytes:
+       movb (%rsi), %al
+       movb %al, (%rdi)
+       incq %rsi
+       incq %rdi
+       decl %ecx
+       jnz .L_copy_trailing_bytes
+
+       /* Copy successful. Return zero */
+.L_done_memcpy_trap:
+       xorq %rax, %rax
+       ret
+ENDPROC(memcpy_mcsafe)
+
+       .section .fixup, "ax"
+       /* Return -EFAULT for any failure */
+.L_memcpy_mcsafe_fail:
+       mov     $-EFAULT, %rax
+       ret
+
+       .previous
+
+       _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
+       _ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)
+#endif
diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S
new file mode 100644 (file)
index 0000000..e1229ec
--- /dev/null
@@ -0,0 +1,138 @@
+/* Copyright 2002 Andi Kleen, SuSE Labs */
+
+#include <linux/linkage.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative-asm.h>
+
+.weak memset
+
+/*
+ * ISO C memset - set a memory block to a byte value. This function uses fast
+ * string to get better performance than the original function. The code is
+ * simpler and shorter than the original function as well.
+ *
+ * rdi   destination
+ * rsi   value (char)
+ * rdx   count (bytes)
+ *
+ * rax   original destination
+ */
+ENTRY(memset)
+ENTRY(__memset)
+       /*
+        * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
+        * to use it when possible. If not available, use fast string instructions.
+        *
+        * Otherwise, use original memset function.
+        */
+       ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
+                     "jmp memset_erms", X86_FEATURE_ERMS
+
+       movq %rdi,%r9
+       movq %rdx,%rcx
+       andl $7,%edx
+       shrq $3,%rcx
+       /* expand byte value  */
+       movzbl %sil,%esi
+       movabs $0x0101010101010101,%rax
+       imulq %rsi,%rax
+       rep stosq
+       movl %edx,%ecx
+       rep stosb
+       movq %r9,%rax
+       ret
+ENDPROC(memset)
+ENDPROC(__memset)
+
+/*
+ * ISO C memset - set a memory block to a byte value. This function uses
+ * enhanced rep stosb to override the fast string function.
+ * The code is simpler and shorter than the fast string function as well.
+ *
+ * rdi   destination
+ * rsi   value (char)
+ * rdx   count (bytes)
+ *
+ * rax   original destination
+ */
+ENTRY(memset_erms)
+       movq %rdi,%r9
+       movb %sil,%al
+       movq %rdx,%rcx
+       rep stosb
+       movq %r9,%rax
+       ret
+ENDPROC(memset_erms)
+
+ENTRY(memset_orig)
+       movq %rdi,%r10
+
+       /* expand byte value  */
+       movzbl %sil,%ecx
+       movabs $0x0101010101010101,%rax
+       imulq  %rcx,%rax
+
+       /* align dst */
+       movl  %edi,%r9d
+       andl  $7,%r9d
+       jnz  .Lbad_alignment
+.Lafter_bad_alignment:
+
+       movq  %rdx,%rcx
+       shrq  $6,%rcx
+       jz       .Lhandle_tail
+
+       .p2align 4
+.Lloop_64:
+       decq  %rcx
+       movq  %rax,(%rdi)
+       movq  %rax,8(%rdi)
+       movq  %rax,16(%rdi)
+       movq  %rax,24(%rdi)
+       movq  %rax,32(%rdi)
+       movq  %rax,40(%rdi)
+       movq  %rax,48(%rdi)
+       movq  %rax,56(%rdi)
+       leaq  64(%rdi),%rdi
+       jnz    .Lloop_64
+
+       /* Handle tail in loops. The loops should be faster than hard
+          to predict jump tables. */
+       .p2align 4
+.Lhandle_tail:
+       movl    %edx,%ecx
+       andl    $63&(~7),%ecx
+       jz              .Lhandle_7
+       shrl    $3,%ecx
+       .p2align 4
+.Lloop_8:
+       decl   %ecx
+       movq  %rax,(%rdi)
+       leaq  8(%rdi),%rdi
+       jnz    .Lloop_8
+
+.Lhandle_7:
+       andl    $7,%edx
+       jz      .Lende
+       .p2align 4
+.Lloop_1:
+       decl    %edx
+       movb    %al,(%rdi)
+       leaq    1(%rdi),%rdi
+       jnz     .Lloop_1
+
+.Lende:
+       movq    %r10,%rax
+       ret
+
+.Lbad_alignment:
+       cmpq $7,%rdx
+       jbe     .Lhandle_7
+       movq %rax,(%rdi)        /* unaligned store */
+       movq $8,%r8
+       subq %r9,%r8
+       addq %r8,%rdi
+       subq %r8,%rdx
+       jmp .Lafter_bad_alignment
+.Lfinal:
+ENDPROC(memset_orig)
index 57c8f98874e833b7407c7000fe2fb8ff1253eb16..a120c6b755a9869798a8dbfd5c90919c52f02eac 100644 (file)
@@ -40,6 +40,8 @@ FEATURE_TESTS_BASIC :=                        \
        libbfd                          \
        libelf                          \
        libelf-getphdrnum               \
+       libelf-gelf_getnote             \
+       libelf-getshdrstrndx            \
        libelf-mmap                     \
        libnuma                         \
        numa_num_possible_cpus          \
@@ -60,7 +62,8 @@ FEATURE_TESTS_BASIC :=                        \
        zlib                            \
        lzma                            \
        get_cpuid                       \
-       bpf
+       bpf                             \
+       sdt
 
 # FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
 # of all feature tests
index 3d88f09e188bdd3e01f7affc2094aeae2e6b1863..a0b29a31181611902b13465f989afb6aebfab983 100644 (file)
@@ -17,6 +17,8 @@ FILES=                                        \
        test-cplus-demangle.bin         \
        test-libelf.bin                 \
        test-libelf-getphdrnum.bin      \
+       test-libelf-gelf_getnote.bin    \
+       test-libelf-getshdrstrndx.bin   \
        test-libelf-mmap.bin            \
        test-libnuma.bin                \
        test-numa_num_possible_cpus.bin \
@@ -43,7 +45,8 @@ FILES=                                        \
        test-zlib.bin                   \
        test-lzma.bin                   \
        test-bpf.bin                    \
-       test-get_cpuid.bin
+       test-get_cpuid.bin              \
+       test-sdt.bin
 
 FILES := $(addprefix $(OUTPUT),$(FILES))
 
@@ -98,6 +101,12 @@ $(OUTPUT)test-libelf-mmap.bin:
 $(OUTPUT)test-libelf-getphdrnum.bin:
        $(BUILD) -lelf
 
+$(OUTPUT)test-libelf-gelf_getnote.bin:
+       $(BUILD) -lelf
+
+$(OUTPUT)test-libelf-getshdrstrndx.bin:
+       $(BUILD) -lelf
+
 $(OUTPUT)test-libnuma.bin:
        $(BUILD) -lnuma
 
@@ -205,6 +214,9 @@ $(OUTPUT)test-get_cpuid.bin:
 $(OUTPUT)test-bpf.bin:
        $(BUILD)
 
+$(OUTPUT)test-sdt.bin:
+       $(BUILD)
+
 -include $(OUTPUT)*.d
 
 ###############################
index a282e8cb84f308da358983ebccbf80c612e7d061..699e43627397b5c3008cd74008016f4812550c77 100644 (file)
 # include "test-libelf-getphdrnum.c"
 #undef main
 
+#define main main_test_libelf_gelf_getnote
+# include "test-libelf-gelf_getnote.c"
+#undef main
+
+#define main main_test_libelf_getshdrstrndx
+# include "test-libelf-getshdrstrndx.c"
+#undef main
+
 #define main main_test_libunwind
 # include "test-libunwind.c"
 #undef main
 # include "test-libcrypto.c"
 #undef main
 
+#define main main_test_sdt
+# include "test-sdt.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
        main_test_libpython();
@@ -149,6 +161,8 @@ int main(int argc, char *argv[])
        main_test_dwarf();
        main_test_dwarf_getlocations();
        main_test_libelf_getphdrnum();
+       main_test_libelf_gelf_getnote();
+       main_test_libelf_getshdrstrndx();
        main_test_libunwind();
        main_test_libaudit();
        main_test_libslang();
@@ -168,6 +182,7 @@ int main(int argc, char *argv[])
        main_test_get_cpuid();
        main_test_bpf();
        main_test_libcrypto();
+       main_test_sdt();
 
        return 0;
 }
diff --git a/tools/build/feature/test-libelf-gelf_getnote.c b/tools/build/feature/test-libelf-gelf_getnote.c
new file mode 100644 (file)
index 0000000..d78cf4d
--- /dev/null
@@ -0,0 +1,7 @@
+#include <stdlib.h>
+#include <gelf.h>
+
+int main(void)
+{
+       return gelf_getnote(NULL, 0, NULL, NULL, NULL);
+}
diff --git a/tools/build/feature/test-libelf-getshdrstrndx.c b/tools/build/feature/test-libelf-getshdrstrndx.c
new file mode 100644 (file)
index 0000000..f0c3b47
--- /dev/null
@@ -0,0 +1,8 @@
+#include <libelf.h>
+
+int main(void)
+{
+       size_t dst;
+
+       return elf_getshdrstrndx(0, &dst);
+}
diff --git a/tools/build/feature/test-sdt.c b/tools/build/feature/test-sdt.c
new file mode 100644 (file)
index 0000000..e4531a6
--- /dev/null
@@ -0,0 +1,7 @@
+#include <sys/sdt.h>
+
+int main(void)
+{
+       DTRACE_PROBE(provider, name);
+       return 0;
+}
index 3a7a54f59713809c7c8f2cda36461a07d55df191..5446d625e17d63ab977900625581eed7a0de1735 100644 (file)
@@ -1,16 +1,31 @@
 CC = $(CROSS_COMPILE)gcc
 CFLAGS += -Wall -g -D_GNU_SOURCE
 
-all: iio_event_monitor lsiio generic_buffer
+BINDIR=usr/bin
+INSTALL_PROGRAM=install -m 755 -p
+DEL_FILE=rm -f
+
+all: iio_event_monitor lsiio iio_generic_buffer
 
 iio_event_monitor: iio_event_monitor.o iio_utils.o
 
 lsiio: lsiio.o iio_utils.o
 
-generic_buffer: generic_buffer.o iio_utils.o
+iio_generic_buffer: iio_generic_buffer.o iio_utils.o
 
 %.o: %.c iio_utils.h
 
+install:
+       - mkdir -p $(INSTALL_ROOT)/$(BINDIR)
+       - $(INSTALL_PROGRAM) "iio_event_monitor" "$(INSTALL_ROOT)/$(BINDIR)/iio_event_monitor"
+       - $(INSTALL_PROGRAM) "lsiio" "$(INSTALL_ROOT)/$(BINDIR)/lsiio"
+       - $(INSTALL_PROGRAM) "iio_generic_buffer" "$(INSTALL_ROOT)/$(BINDIR)/iio_generic_buffer"
+
+uninstall:
+       $(DEL_FILE) "$(INSTALL_ROOT)/$(BINDIR)/iio_event_monitor"
+       $(DEL_FILE) "$(INSTALL_ROOT)/$(BINDIR)/lsiio"
+       $(DEL_FILE) "$(INSTALL_ROOT)/$(BINDIR)/iio_generic_buffer"
+
 .PHONY: clean
 clean:
-       rm -f *.o iio_event_monitor lsiio generic_buffer
+       rm -f *.o iio_event_monitor lsiio iio_generic_buffer
similarity index 76%
rename from tools/iio/generic_buffer.c
rename to tools/iio/iio_generic_buffer.c
index 2429c78de94061b0867b9281aeb75fd6bea2553a..0e8a1f7a292d13edba366a9ad60b91dd2b47747c 100644 (file)
@@ -32,6 +32,8 @@
 #include <endian.h>
 #include <getopt.h>
 #include <inttypes.h>
+#include <stdbool.h>
+#include <signal.h>
 #include "iio_utils.h"
 
 /**
@@ -249,11 +251,82 @@ void print_usage(void)
                "  -e         Disable wait for event (new data)\n"
                "  -g         Use trigger-less mode\n"
                "  -l <n>     Set buffer length to n samples\n"
-               "  -n <name>  Set device name (mandatory)\n"
-               "  -t <name>  Set trigger name\n"
+               "  --device-name -n <name>\n"
+               "  --device-num -N <num>\n"
+               "        Set device by name or number (mandatory)\n"
+               "  --trigger-name -t <name>\n"
+               "  --trigger-num -T <num>\n"
+               "        Set trigger by name or number\n"
                "  -w <n>     Set delay between reads in us (event-less mode)\n");
 }
 
+enum autochan autochannels = AUTOCHANNELS_DISABLED;
+char *dev_dir_name = NULL;
+char *buf_dir_name = NULL;
+bool current_trigger_set = false;
+
+void cleanup(void)
+{
+       int ret;
+
+       /* Disable trigger */
+       if (dev_dir_name && current_trigger_set) {
+               /* Disconnect the trigger - just write a dummy name. */
+               ret = write_sysfs_string("trigger/current_trigger",
+                                        dev_dir_name, "NULL");
+               if (ret < 0)
+                       fprintf(stderr, "Failed to disable trigger: %s\n",
+                               strerror(-ret));
+               current_trigger_set = false;
+       }
+
+       /* Disable buffer */
+       if (buf_dir_name) {
+               ret = write_sysfs_int("enable", buf_dir_name, 0);
+               if (ret < 0)
+                       fprintf(stderr, "Failed to disable buffer: %s\n",
+                               strerror(-ret));
+       }
+
+       /* Disable channels if auto-enabled */
+       if (dev_dir_name && autochannels == AUTOCHANNELS_ACTIVE) {
+               ret = enable_disable_all_channels(dev_dir_name, 0);
+               if (ret)
+                       fprintf(stderr, "Failed to disable all channels\n");
+               autochannels = AUTOCHANNELS_DISABLED;
+       }
+}
+
+void sig_handler(int signum)
+{
+       fprintf(stderr, "Caught signal %d\n", signum);
+       cleanup();
+       exit(-signum);
+}
+
+void register_cleanup(void)
+{
+       struct sigaction sa = { .sa_handler = sig_handler };
+       const int signums[] = { SIGINT, SIGTERM, SIGABRT };
+       int ret, i;
+
+       for (i = 0; i < ARRAY_SIZE(signums); ++i) {
+               ret = sigaction(signums[i], &sa, NULL);
+               if (ret) {
+                       perror("Failed to register signal handler");
+                       exit(-1);
+               }
+       }
+}
+
+static const struct option longopts[] = {
+       { "device-name",        1, 0, 'n' },
+       { "device-num",         1, 0, 'N' },
+       { "trigger-name",       1, 0, 't' },
+       { "trigger-num",        1, 0, 'T' },
+       { },
+};
+
 int main(int argc, char **argv)
 {
        unsigned long num_loops = 2;
@@ -261,26 +334,25 @@ int main(int argc, char **argv)
        unsigned long buf_len = 128;
 
        int ret, c, i, j, toread;
-       int fp;
+       int fp = -1;
 
-       int num_channels;
+       int num_channels = 0;
        char *trigger_name = NULL, *device_name = NULL;
-       char *dev_dir_name, *buf_dir_name;
 
-       int datardytrigger = 1;
-       char *data;
+       char *data = NULL;
        ssize_t read_size;
-       int dev_num, trig_num;
-       char *buffer_access;
+       int dev_num = -1, trig_num = -1;
+       char *buffer_access = NULL;
        int scan_size;
        int noevents = 0;
        int notrigger = 0;
-       enum autochan autochannels = AUTOCHANNELS_DISABLED;
        char *dummy;
 
        struct iio_channel_info *channels;
 
-       while ((c = getopt(argc, argv, "ac:egl:n:t:w:")) != -1) {
+       register_cleanup();
+
+       while ((c = getopt_long(argc, argv, "ac:egl:n:N:t:T:w:", longopts, NULL)) != -1) {
                switch (c) {
                case 'a':
                        autochannels = AUTOCHANNELS_ENABLED;
@@ -288,8 +360,10 @@ int main(int argc, char **argv)
                case 'c':
                        errno = 0;
                        num_loops = strtoul(optarg, &dummy, 10);
-                       if (errno)
-                               return -errno;
+                       if (errno) {
+                               ret = -errno;
+                               goto error;
+                       }
 
                        break;
                case 'e':
@@ -301,49 +375,102 @@ int main(int argc, char **argv)
                case 'l':
                        errno = 0;
                        buf_len = strtoul(optarg, &dummy, 10);
-                       if (errno)
-                               return -errno;
+                       if (errno) {
+                               ret = -errno;
+                               goto error;
+                       }
 
                        break;
                case 'n':
-                       device_name = optarg;
+                       device_name = strdup(optarg);
+                       break;
+               case 'N':
+                       errno = 0;
+                       dev_num = strtoul(optarg, &dummy, 10);
+                       if (errno) {
+                               ret = -errno;
+                               goto error;
+                       }
                        break;
                case 't':
-                       trigger_name = optarg;
-                       datardytrigger = 0;
+                       trigger_name = strdup(optarg);
                        break;
-               case 'w':
+               case 'T':
                        errno = 0;
-                       timedelay = strtoul(optarg, &dummy, 10);
+                       trig_num = strtoul(optarg, &dummy, 10);
                        if (errno)
                                return -errno;
                        break;
+               case 'w':
+                       errno = 0;
+                       timedelay = strtoul(optarg, &dummy, 10);
+                       if (errno) {
+                               ret = -errno;
+                               goto error;
+                       }
+                       break;
                case '?':
                        print_usage();
-                       return -1;
+                       ret = -1;
+                       goto error;
                }
        }
 
-       if (!device_name) {
-               fprintf(stderr, "Device name not set\n");
-               print_usage();
-               return -1;
-       }
-
        /* Find the device requested */
-       dev_num = find_type_by_name(device_name, "iio:device");
-       if (dev_num < 0) {
-               fprintf(stderr, "Failed to find the %s\n", device_name);
-               return dev_num;
+       if (dev_num < 0 && !device_name) {
+               fprintf(stderr, "Device not set\n");
+               print_usage();
+               ret = -1;
+               goto error;
+       } else if (dev_num >= 0 && device_name) {
+               fprintf(stderr, "Only one of --device-num or --device-name needs to be set\n");
+               print_usage();
+               ret = -1;
+               goto error;
+       } else if (dev_num < 0) {
+               dev_num = find_type_by_name(device_name, "iio:device");
+               if (dev_num < 0) {
+                       fprintf(stderr, "Failed to find the %s\n", device_name);
+                       ret = dev_num;
+                       goto error;
+               }
        }
-
        printf("iio device number being used is %d\n", dev_num);
 
        ret = asprintf(&dev_dir_name, "%siio:device%d", iio_dir, dev_num);
        if (ret < 0)
                return -ENOMEM;
+       /* Fetch device_name if specified by number */
+       if (!device_name) {
+               device_name = malloc(IIO_MAX_NAME_LENGTH);
+               if (!device_name) {
+                       ret = -ENOMEM;
+                       goto error;
+               }
+               ret = read_sysfs_string("name", dev_dir_name, device_name);
+               if (ret < 0) {
+                       fprintf(stderr, "Failed to read name of device %d\n", dev_num);
+                       goto error;
+               }
+       }
 
-       if (!notrigger) {
+       if (notrigger) {
+               printf("trigger-less mode selected\n");
+       } if (trig_num >= 0) {
+               char *trig_dev_name;
+               ret = asprintf(&trig_dev_name, "%strigger%d", iio_dir, trig_num);
+               if (ret < 0) {
+                       return -ENOMEM;
+               }
+               trigger_name = malloc(IIO_MAX_NAME_LENGTH);
+               ret = read_sysfs_string("name", trig_dev_name, trigger_name);
+               free(trig_dev_name);
+               if (ret < 0) {
+                       fprintf(stderr, "Failed to read trigger%d name from\n", trig_num);
+                       return ret;
+               }
+               printf("iio trigger number being used is %d\n", trig_num);
+       } else {
                if (!trigger_name) {
                        /*
                         * Build the trigger name. If it is device associated
@@ -354,7 +481,7 @@ int main(int argc, char **argv)
                                       "%s-dev%d", device_name, dev_num);
                        if (ret < 0) {
                                ret = -ENOMEM;
-                               goto error_free_dev_dir_name;
+                               goto error;
                        }
                }
 
@@ -367,7 +494,7 @@ int main(int argc, char **argv)
                                       "%s-trigger", device_name);
                        if (ret < 0) {
                                ret = -ENOMEM;
-                               goto error_free_dev_dir_name;
+                               goto error;
                        }
                }
 
@@ -376,12 +503,10 @@ int main(int argc, char **argv)
                        fprintf(stderr, "Failed to find the trigger %s\n",
                                trigger_name);
                        ret = trig_num;
-                       goto error_free_triggername;
+                       goto error;
                }
 
                printf("iio trigger number being used is %d\n", trig_num);
-       } else {
-               printf("trigger-less mode selected\n");
        }
 
        /*
@@ -392,7 +517,7 @@ int main(int argc, char **argv)
        if (ret) {
                fprintf(stderr, "Problem reading scan element information\n"
                        "diag %s\n", dev_dir_name);
-               goto error_free_triggername;
+               goto error;
        }
        if (num_channels && autochannels == AUTOCHANNELS_ENABLED) {
                fprintf(stderr, "Auto-channels selected but some channels "
@@ -407,7 +532,7 @@ int main(int argc, char **argv)
                ret = enable_disable_all_channels(dev_dir_name, 1);
                if (ret) {
                        fprintf(stderr, "Failed to enable all channels\n");
-                       goto error_free_triggername;
+                       goto error;
                }
 
                /* This flags that we need to disable the channels again */
@@ -419,12 +544,12 @@ int main(int argc, char **argv)
                        fprintf(stderr, "Problem reading scan element "
                                "information\n"
                                "diag %s\n", dev_dir_name);
-                       goto error_disable_channels;
+                       goto error;
                }
                if (!num_channels) {
                        fprintf(stderr, "Still no channels after "
                                "auto-enabling, giving up\n");
-                       goto error_disable_channels;
+                       goto error;
                }
        }
 
@@ -436,7 +561,7 @@ int main(int argc, char **argv)
                        "/*_en or pass -a to autoenable channels and "
                        "try again.\n", dev_dir_name);
                ret = -ENOENT;
-               goto error_free_triggername;
+               goto error;
        }
 
        /*
@@ -448,7 +573,7 @@ int main(int argc, char **argv)
                       "%siio:device%d/buffer", iio_dir, dev_num);
        if (ret < 0) {
                ret = -ENOMEM;
-               goto error_free_channels;
+               goto error;
        }
 
        if (!notrigger) {
@@ -463,34 +588,34 @@ int main(int argc, char **argv)
                if (ret < 0) {
                        fprintf(stderr,
                                "Failed to write current_trigger file\n");
-                       goto error_free_buf_dir_name;
+                       goto error;
                }
        }
 
        /* Setup ring buffer parameters */
        ret = write_sysfs_int("length", buf_dir_name, buf_len);
        if (ret < 0)
-               goto error_free_buf_dir_name;
+               goto error;
 
        /* Enable the buffer */
        ret = write_sysfs_int("enable", buf_dir_name, 1);
        if (ret < 0) {
                fprintf(stderr,
                        "Failed to enable buffer: %s\n", strerror(-ret));
-               goto error_free_buf_dir_name;
+               goto error;
        }
 
        scan_size = size_from_channelarray(channels, num_channels);
        data = malloc(scan_size * buf_len);
        if (!data) {
                ret = -ENOMEM;
-               goto error_free_buf_dir_name;
+               goto error;
        }
 
        ret = asprintf(&buffer_access, "/dev/iio:device%d", dev_num);
        if (ret < 0) {
                ret = -ENOMEM;
-               goto error_free_data;
+               goto error;
        }
 
        /* Attempt to open non blocking the access dev */
@@ -498,7 +623,7 @@ int main(int argc, char **argv)
        if (fp == -1) { /* TODO: If it isn't there make the node */
                ret = -errno;
                fprintf(stderr, "Failed to open %s\n", buffer_access);
-               goto error_free_buffer_access;
+               goto error;
        }
 
        for (j = 0; j < num_loops; j++) {
@@ -511,7 +636,7 @@ int main(int argc, char **argv)
                        ret = poll(&pfd, 1, -1);
                        if (ret < 0) {
                                ret = -errno;
-                               goto error_close_buffer_access;
+                               goto error;
                        } else if (ret == 0) {
                                continue;
                        }
@@ -536,45 +661,21 @@ int main(int argc, char **argv)
                                     num_channels);
        }
 
-       /* Stop the buffer */
-       ret = write_sysfs_int("enable", buf_dir_name, 0);
-       if (ret < 0)
-               goto error_close_buffer_access;
+error:
+       cleanup();
 
-       if (!notrigger)
-               /* Disconnect the trigger - just write a dummy name. */
-               ret = write_sysfs_string("trigger/current_trigger",
-                                        dev_dir_name, "NULL");
-               if (ret < 0)
-                       fprintf(stderr, "Failed to write to %s\n",
-                               dev_dir_name);
-
-error_close_buffer_access:
-       if (close(fp) == -1)
+       if (fp >= 0 && close(fp) == -1)
                perror("Failed to close buffer");
-
-error_free_buffer_access:
        free(buffer_access);
-error_free_data:
        free(data);
-error_free_buf_dir_name:
        free(buf_dir_name);
-error_free_channels:
        for (i = num_channels - 1; i >= 0; i--) {
                free(channels[i].name);
                free(channels[i].generic_name);
        }
        free(channels);
-error_free_triggername:
-       if (datardytrigger)
-               free(trigger_name);
-error_disable_channels:
-       if (autochannels == AUTOCHANNELS_ACTIVE) {
-               ret = enable_disable_all_channels(dev_dir_name, 0);
-               if (ret)
-                       fprintf(stderr, "Failed to disable all channels\n");
-       }
-error_free_dev_dir_name:
+       free(trigger_name);
+       free(device_name);
        free(dev_dir_name);
 
        return ret;
index c94175015a82a04a5c8fc7e9a323f1eeeaf820f2..b3accfdf24b9b2fd93707ba4da185700718fe54d 100644 (file)
@@ -2,6 +2,7 @@
 #define _TOOLS_LINUX_ASM_GENERIC_BITOPS___FFS_H_
 
 #include <asm/types.h>
+#include <asm/bitsperlong.h>
 
 /**
  * __ffs - find first bit in word.
index 494c9c615d1cfd9ecbd0ad0bd2a308c1aab2884a..a60a7ccb6782d869b9d47503a158f86a92b84374 100644 (file)
@@ -1 +1,43 @@
-#include "../../../../include/asm-generic/bitops/__fls.h"
+#ifndef _ASM_GENERIC_BITOPS___FLS_H_
+#define _ASM_GENERIC_BITOPS___FLS_H_
+
+#include <asm/types.h>
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __fls(unsigned long word)
+{
+       int num = BITS_PER_LONG - 1;
+
+#if BITS_PER_LONG == 64
+       if (!(word & (~0ul << 32))) {
+               num -= 32;
+               word <<= 32;
+       }
+#endif
+       if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
+               num -= 16;
+               word <<= 16;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
+               num -= 8;
+               word <<= 8;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
+               num -= 4;
+               word <<= 4;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
+               num -= 2;
+               word <<= 2;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG-1))))
+               num -= 1;
+       return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
index 318bb2b202b0cd09a12bdb464e3cd37305653c8a..6a211f40665c02c8e2083b4966eb9c0d0616e9fb 100644 (file)
@@ -1 +1,25 @@
-#include "../../../../include/asm-generic/bitops/arch_hweight.h"
+#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
+
+#include <asm/types.h>
+
+static inline unsigned int __arch_hweight32(unsigned int w)
+{
+       return __sw_hweight32(w);
+}
+
+static inline unsigned int __arch_hweight16(unsigned int w)
+{
+       return __sw_hweight16(w);
+}
+
+static inline unsigned int __arch_hweight8(unsigned int w)
+{
+       return __sw_hweight8(w);
+}
+
+static inline unsigned long __arch_hweight64(__u64 w)
+{
+       return __sw_hweight64(w);
+}
+#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
index 4bccd7c3d5d6572052d2206a2c09b6a5e57d3f86..18663f59d72f7c221286da025be45ac08b590165 100644 (file)
@@ -2,6 +2,7 @@
 #define _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_
 
 #include <asm/types.h>
+#include <asm/bitsperlong.h>
 
 static inline void set_bit(int nr, unsigned long *addr)
 {
index 0afd644aff838cfe9e1e07dd57d251f525013b3a..0a7e06623470586b0509ef99410dbc54aa572f9e 100644 (file)
@@ -1 +1,43 @@
-#include "../../../../include/asm-generic/bitops/const_hweight.h"
+#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
+
+/*
+ * Compile time versions of __arch_hweightN()
+ */
+#define __const_hweight8(w)            \
+       ((unsigned int)                 \
+        ((!!((w) & (1ULL << 0))) +     \
+         (!!((w) & (1ULL << 1))) +     \
+         (!!((w) & (1ULL << 2))) +     \
+         (!!((w) & (1ULL << 3))) +     \
+         (!!((w) & (1ULL << 4))) +     \
+         (!!((w) & (1ULL << 5))) +     \
+         (!!((w) & (1ULL << 6))) +     \
+         (!!((w) & (1ULL << 7)))))
+
+#define __const_hweight16(w) (__const_hweight8(w)  + __const_hweight8((w)  >> 8 ))
+#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
+#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
+
+/*
+ * Generic interface.
+ */
+#define hweight8(w)  (__builtin_constant_p(w) ? __const_hweight8(w)  : __arch_hweight8(w))
+#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
+#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
+#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
+
+/*
+ * Interface for known constant arguments
+ */
+#define HWEIGHT8(w)  (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
+#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
+#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
+#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
+
+/*
+ * Type invariant interface to the compile time constant hweight functions.
+ */
+#define HWEIGHT(w)   HWEIGHT64((u64)w)
+
+#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */
index 0e4995fa02480ca9b522c7c491277594118582a0..0576d1f42f43fc34fb5efa8e27969afc5dbdf0b4 100644 (file)
@@ -1 +1,41 @@
-#include "../../../../include/asm-generic/bitops/fls.h"
+#ifndef _ASM_GENERIC_BITOPS_FLS_H_
+#define _ASM_GENERIC_BITOPS_FLS_H_
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static __always_inline int fls(int x)
+{
+       int r = 32;
+
+       if (!x)
+               return 0;
+       if (!(x & 0xffff0000u)) {
+               x <<= 16;
+               r -= 16;
+       }
+       if (!(x & 0xff000000u)) {
+               x <<= 8;
+               r -= 8;
+       }
+       if (!(x & 0xf0000000u)) {
+               x <<= 4;
+               r -= 4;
+       }
+       if (!(x & 0xc0000000u)) {
+               x <<= 2;
+               r -= 2;
+       }
+       if (!(x & 0x80000000u)) {
+               x <<= 1;
+               r -= 1;
+       }
+       return r;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
index 35bee0071e78b76f9eb75b28e943d7bb02877df5..b097cf8444e3f99811ee395144b05a0a237519f3 100644 (file)
@@ -1 +1,36 @@
-#include "../../../../include/asm-generic/bitops/fls64.h"
+#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
+#define _ASM_GENERIC_BITOPS_FLS64_H_
+
+#include <asm/types.h>
+
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+#if BITS_PER_LONG == 32
+static __always_inline int fls64(__u64 x)
+{
+       __u32 h = x >> 32;
+       if (h)
+               return fls(h) + 32;
+       return fls(x);
+}
+#elif BITS_PER_LONG == 64
+static __always_inline int fls64(__u64 x)
+{
+       if (x == 0)
+               return 0;
+       return __fls(x) + 1;
+}
+#else
+#error BITS_PER_LONG not 32 or 64
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/tools/include/asm-generic/bitsperlong.h b/tools/include/asm-generic/bitsperlong.h
new file mode 100644 (file)
index 0000000..45eca51
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef __ASM_GENERIC_BITS_PER_LONG
+#define __ASM_GENERIC_BITS_PER_LONG
+
+#include <uapi/asm-generic/bitsperlong.h>
+
+#ifdef __SIZEOF_LONG__
+#define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
+#else
+#define BITS_PER_LONG __WORDSIZE
+#endif
+
+#if BITS_PER_LONG != __BITS_PER_LONG
+#error Inconsistent word size. Check asm/bitsperlong.h
+#endif
+
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif
+
+#endif /* __ASM_GENERIC_BITS_PER_LONG */
similarity index 66%
rename from tools/perf/util/include/asm/alternative-asm.h
rename to tools/include/asm/alternative-asm.h
index 3a3a0f16456ae3369cd73faff1d3af9e4e8d4180..2a4d1bfa29884823729c28b63603857f8ab766e2 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _PERF_ASM_ALTERNATIVE_ASM_H
-#define _PERF_ASM_ALTERNATIVE_ASM_H
+#ifndef _TOOLS_ASM_ALTERNATIVE_ASM_H
+#define _TOOLS_ASM_ALTERNATIVE_ASM_H
 
 /* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
 
index 5ad9ee1dd7f6aed579a5e631e309438a80bb472f..49c929a104eee364fe796f64d895d3ab9cced797 100644 (file)
@@ -9,7 +9,9 @@
 #define __WORDSIZE (__SIZEOF_LONG__ * 8)
 #endif
 
-#define BITS_PER_LONG __WORDSIZE
+#ifndef BITS_PER_LONG
+# define BITS_PER_LONG __WORDSIZE
+#endif
 
 #define BIT_MASK(nr)           (1UL << ((nr) % BITS_PER_LONG))
 #define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
index fa7208a32d763d0ae35e953ccff82c69c772a5c2..e33fc1df3935e3164280fab29ca0a34908ae5b16 100644 (file)
@@ -9,6 +9,17 @@
 # define __always_inline       inline __attribute__((always_inline))
 #endif
 
+#ifdef __ANDROID__
+/*
+ * FIXME: Big hammer to get rid of tons of:
+ *   "warning: always_inline function might not be inlinable"
+ *
+ * At least on android-ndk-r12/platforms/android-24/arch-arm
+ */
+#undef __always_inline
+#define __always_inline        inline
+#endif
+
 #define __user
 
 #ifndef __attribute_const__
index d026c6573018b5fe5595265cef38d20b2cc4e5ec..ad6fa21d977b59fb938bde3c7e727deb24b28fca 100644 (file)
@@ -1,5 +1,104 @@
-#include "../../../include/linux/hash.h"
+#ifndef _LINUX_HASH_H
+#define _LINUX_HASH_H
+/* Fast hashing routine for ints,  longs and pointers.
+   (C) 2002 Nadia Yvette Chambers, IBM */
 
-#ifndef _TOOLS_LINUX_HASH_H
-#define _TOOLS_LINUX_HASH_H
+#include <asm/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
+ * fs/inode.c.  It's not actually prime any more (the previous primes
+ * were actively bad for hashing), but the name remains.
+ */
+#if BITS_PER_LONG == 32
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
+#define hash_long(val, bits) hash_32(val, bits)
+#elif BITS_PER_LONG == 64
+#define hash_long(val, bits) hash_64(val, bits)
+#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
+#else
+#error Wordsize not 32 or 64
+#endif
+
+/*
+ * This hash multiplies the input by a large odd number and takes the
+ * high bits.  Since multiplication propagates changes to the most
+ * significant end only, it is essential that the high bits of the
+ * product be used for the hash value.
+ *
+ * Chuck Lever verified the effectiveness of this technique:
+ * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties.  (See Knuth vol 3, section 6.4, exercise 9.)
+ *
+ * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
+ * which is very slightly easier to multiply by and makes no
+ * difference to the hash distribution.
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
+#ifdef CONFIG_HAVE_ARCH_HASH
+/* This header may use the GOLDEN_RATIO_xx constants */
+#include <asm/hash.h>
+#endif
+
+/*
+ * The _generic versions exist only so lib/test_hash.c can compare
+ * the arch-optimized versions with the generic.
+ *
+ * Note that if you change these, any <asm/hash.h> that aren't updated
+ * to match need to have their HAVE_ARCH_* define values updated so the
+ * self-test will not false-positive.
+ */
+#ifndef HAVE_ARCH__HASH_32
+#define __hash_32 __hash_32_generic
+#endif
+static inline u32 __hash_32_generic(u32 val)
+{
+       return val * GOLDEN_RATIO_32;
+}
+
+#ifndef HAVE_ARCH_HASH_32
+#define hash_32 hash_32_generic
 #endif
+static inline u32 hash_32_generic(u32 val, unsigned int bits)
+{
+       /* High bits are more random, so use them. */
+       return __hash_32(val) >> (32 - bits);
+}
+
+#ifndef HAVE_ARCH_HASH_64
+#define hash_64 hash_64_generic
+#endif
+static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
+{
+#if BITS_PER_LONG == 64
+       /* 64x64-bit multiply is efficient on all 64-bit processors */
+       return val * GOLDEN_RATIO_64 >> (64 - bits);
+#else
+       /* Hash 64 bits using only 32x32-bit multiply. */
+       return hash_32((u32)val ^ __hash_32(val >> 32), bits);
+#endif
+}
+
+static inline u32 hash_ptr(const void *ptr, unsigned int bits)
+{
+       return hash_long((unsigned long)ptr, bits);
+}
+
+/* This really should be called fold32_ptr; it does no hashing to speak of. */
+static inline u32 hash32_ptr(const void *ptr)
+{
+       unsigned long val = (unsigned long)ptr;
+
+#if BITS_PER_LONG == 64
+       val ^= (val >> 32);
+#endif
+       return (u32)val;
+}
+
+#endif /* _LINUX_HASH_H */
index 76df53539c2a4671a48da7cb64faa8dffea5dc51..28607db02bd3ed165cd535de415ffd3d4476753f 100644 (file)
@@ -2,8 +2,7 @@
 #define __TOOLS_LINUX_KERNEL_H
 
 #include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
+#include <stddef.h>
 #include <assert.h>
 
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 #define cpu_to_le64(x) (x)
 #define cpu_to_le32(x) (x)
 
-static inline int
-vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
-{
-       int i;
-       ssize_t ssize = size;
-
-       i = vsnprintf(buf, size, fmt, args);
-
-       return (i >= ssize) ? (ssize - 1) : i;
-}
-
-static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
-{
-       va_list args;
-       ssize_t ssize = size;
-       int i;
-
-       va_start(args, fmt);
-       i = vsnprintf(buf, size, fmt, args);
-       va_end(args);
-
-       return (i >= ssize) ? (ssize - 1) : i;
-}
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+int scnprintf(char * buf, size_t size, const char * fmt, ...);
 
 /*
  * This looks more complex than it should be. But we need to
index 0c27bdf142334570b0090b8c211f419ecccfbe5b..51334edec506815a70868dd451d5c72944ed0a59 100644 (file)
@@ -1 +1,90 @@
-#include "../../../include/linux/poison.h"
+#ifndef _LINUX_POISON_H
+#define _LINUX_POISON_H
+
+/********** include/linux/list.h **********/
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1  ((void *) 0x100 + POISON_POINTER_DELTA)
+#define LIST_POISON2  ((void *) 0x200 + POISON_POINTER_DELTA)
+
+/********** include/linux/timer.h **********/
+/*
+ * Magic number "tsta" to indicate a static timer initializer
+ * for the object debugging code.
+ */
+#define TIMER_ENTRY_STATIC     ((void *) 0x300 + POISON_POINTER_DELTA)
+
+/********** mm/debug-pagealloc.c **********/
+#ifdef CONFIG_PAGE_POISONING_ZERO
+#define PAGE_POISON 0x00
+#else
+#define PAGE_POISON 0xaa
+#endif
+
+/********** mm/page_alloc.c ************/
+
+#define TAIL_MAPPING   ((void *) 0x400 + POISON_POINTER_DELTA)
+
+/********** mm/slab.c **********/
+/*
+ * Magic nums for obj red zoning.
+ * Placed in the first word before and the first word after an obj.
+ */
+#define        RED_INACTIVE    0x09F911029D74E35BULL   /* when obj is inactive */
+#define        RED_ACTIVE      0xD84156C5635688C0ULL   /* when obj is active */
+
+#define SLUB_RED_INACTIVE      0xbb
+#define SLUB_RED_ACTIVE                0xcc
+
+/* ...and for poisoning */
+#define        POISON_INUSE    0x5a    /* for use-uninitialised poisoning */
+#define POISON_FREE    0x6b    /* for use-after-free poisoning */
+#define        POISON_END      0xa5    /* end-byte of poisoning */
+
+/********** arch/$ARCH/mm/init.c **********/
+#define POISON_FREE_INITMEM    0xcc
+
+/********** arch/ia64/hp/common/sba_iommu.c **********/
+/*
+ * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
+ * value of "SBAIOMMU POISON\0" for spill-over poisoning.
+ */
+
+/********** fs/jbd/journal.c **********/
+#define JBD_POISON_FREE                0x5b
+#define JBD2_POISON_FREE       0x5c
+
+/********** drivers/base/dmapool.c **********/
+#define        POOL_POISON_FREED       0xa7    /* !inuse */
+#define        POOL_POISON_ALLOCATED   0xa9    /* !initted */
+
+/********** drivers/atm/ **********/
+#define ATM_POISON_FREE                0x12
+#define ATM_POISON             0xdeadbeef
+
+/********** kernel/mutexes **********/
+#define MUTEX_DEBUG_INIT       0x11
+#define MUTEX_DEBUG_FREE       0x22
+
+/********** lib/flex_array.c **********/
+#define FLEX_ARRAY_FREE        0x6c    /* for use-after-free poisoning */
+
+/********** security/ **********/
+#define KEY_DESTROY            0xbd
+
+#endif
index e26223f1f2872fb1091d3e9352e491fce665ef5e..b968794773116527208be6eb4b7dfccff00edd2f 100644 (file)
@@ -8,8 +8,10 @@ void *memdup(const void *src, size_t len);
 
 int strtobool(const char *s, bool *res);
 
-#ifndef __UCLIBC__
+#ifdef __GLIBC__
 extern size_t strlcpy(char *dest, const char *src, size_t size);
 #endif
 
+char *str_error_r(int errnum, char *buf, size_t buflen);
+
 #endif /* _LINUX_STRING_H_ */
diff --git a/tools/include/uapi/asm-generic/bitsperlong.h b/tools/include/uapi/asm-generic/bitsperlong.h
new file mode 100644 (file)
index 0000000..23e6c41
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef _UAPI__ASM_GENERIC_BITS_PER_LONG
+#define _UAPI__ASM_GENERIC_BITS_PER_LONG
+
+/*
+ * There seems to be no way of detecting this automatically from user
+ * space, so 64 bit architectures should override this in their
+ * bitsperlong.h. In particular, an architecture that supports
+ * both 32 and 64 bit user space must not rely on CONFIG_64BIT
+ * to decide it, but rather check a compiler provided macro.
+ */
+#ifndef __BITS_PER_LONG
+#define __BITS_PER_LONG 32
+#endif
+
+#endif /* _UAPI__ASM_GENERIC_BITS_PER_LONG */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
new file mode 100644 (file)
index 0000000..406459b
--- /dev/null
@@ -0,0 +1,389 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _UAPI__LINUX_BPF_H__
+#define _UAPI__LINUX_BPF_H__
+
+#include <linux/types.h>
+#include <linux/bpf_common.h>
+
+/* Extended instruction set based on top of classic BPF */
+
+/* instruction classes */
+#define BPF_ALU64      0x07    /* alu mode in double word width */
+
+/* ld/ldx fields */
+#define BPF_DW         0x18    /* double word */
+#define BPF_XADD       0xc0    /* exclusive add */
+
+/* alu/jmp fields */
+#define BPF_MOV                0xb0    /* mov reg to reg */
+#define BPF_ARSH       0xc0    /* sign extending arithmetic shift right */
+
+/* change endianness of a register */
+#define BPF_END                0xd0    /* flags for endianness conversion: */
+#define BPF_TO_LE      0x00    /* convert to little-endian */
+#define BPF_TO_BE      0x08    /* convert to big-endian */
+#define BPF_FROM_LE    BPF_TO_LE
+#define BPF_FROM_BE    BPF_TO_BE
+
+#define BPF_JNE                0x50    /* jump != */
+#define BPF_JSGT       0x60    /* SGT is signed '>', GT in x86 */
+#define BPF_JSGE       0x70    /* SGE is signed '>=', GE in x86 */
+#define BPF_CALL       0x80    /* function call */
+#define BPF_EXIT       0x90    /* function return */
+
+/* Register numbers */
+enum {
+       BPF_REG_0 = 0,
+       BPF_REG_1,
+       BPF_REG_2,
+       BPF_REG_3,
+       BPF_REG_4,
+       BPF_REG_5,
+       BPF_REG_6,
+       BPF_REG_7,
+       BPF_REG_8,
+       BPF_REG_9,
+       BPF_REG_10,
+       __MAX_BPF_REG,
+};
+
+/* BPF has 10 general purpose 64-bit registers and stack frame. */
+#define MAX_BPF_REG    __MAX_BPF_REG
+
+struct bpf_insn {
+       __u8    code;           /* opcode */
+       __u8    dst_reg:4;      /* dest register */
+       __u8    src_reg:4;      /* source register */
+       __s16   off;            /* signed offset */
+       __s32   imm;            /* signed immediate constant */
+};
+
+/* BPF syscall commands, see bpf(2) man-page for details. */
+enum bpf_cmd {
+       BPF_MAP_CREATE,
+       BPF_MAP_LOOKUP_ELEM,
+       BPF_MAP_UPDATE_ELEM,
+       BPF_MAP_DELETE_ELEM,
+       BPF_MAP_GET_NEXT_KEY,
+       BPF_PROG_LOAD,
+       BPF_OBJ_PIN,
+       BPF_OBJ_GET,
+};
+
+enum bpf_map_type {
+       BPF_MAP_TYPE_UNSPEC,
+       BPF_MAP_TYPE_HASH,
+       BPF_MAP_TYPE_ARRAY,
+       BPF_MAP_TYPE_PROG_ARRAY,
+       BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       BPF_MAP_TYPE_PERCPU_HASH,
+       BPF_MAP_TYPE_PERCPU_ARRAY,
+       BPF_MAP_TYPE_STACK_TRACE,
+};
+
+enum bpf_prog_type {
+       BPF_PROG_TYPE_UNSPEC,
+       BPF_PROG_TYPE_SOCKET_FILTER,
+       BPF_PROG_TYPE_KPROBE,
+       BPF_PROG_TYPE_SCHED_CLS,
+       BPF_PROG_TYPE_SCHED_ACT,
+       BPF_PROG_TYPE_TRACEPOINT,
+};
+
+#define BPF_PSEUDO_MAP_FD      1
+
+/* flags for BPF_MAP_UPDATE_ELEM command */
+#define BPF_ANY                0 /* create new element or update existing */
+#define BPF_NOEXIST    1 /* create new element if it didn't exist */
+#define BPF_EXIST      2 /* update existing element */
+
+#define BPF_F_NO_PREALLOC      (1U << 0)
+
+union bpf_attr {
+       struct { /* anonymous struct used by BPF_MAP_CREATE command */
+               __u32   map_type;       /* one of enum bpf_map_type */
+               __u32   key_size;       /* size of key in bytes */
+               __u32   value_size;     /* size of value in bytes */
+               __u32   max_entries;    /* max number of entries in a map */
+               __u32   map_flags;      /* prealloc or not */
+       };
+
+       struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
+               __u32           map_fd;
+               __aligned_u64   key;
+               union {
+                       __aligned_u64 value;
+                       __aligned_u64 next_key;
+               };
+               __u64           flags;
+       };
+
+       struct { /* anonymous struct used by BPF_PROG_LOAD command */
+               __u32           prog_type;      /* one of enum bpf_prog_type */
+               __u32           insn_cnt;
+               __aligned_u64   insns;
+               __aligned_u64   license;
+               __u32           log_level;      /* verbosity level of verifier */
+               __u32           log_size;       /* size of user buffer */
+               __aligned_u64   log_buf;        /* user supplied buffer */
+               __u32           kern_version;   /* checked when prog_type=kprobe */
+       };
+
+       struct { /* anonymous struct used by BPF_OBJ_* commands */
+               __aligned_u64   pathname;
+               __u32           bpf_fd;
+       };
+} __attribute__((aligned(8)));
+
+/* integer value in 'imm' field of BPF_CALL instruction selects which helper
+ * function eBPF program intends to call
+ */
+enum bpf_func_id {
+       BPF_FUNC_unspec,
+       BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
+       BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
+       BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
+       BPF_FUNC_probe_read,      /* int bpf_probe_read(void *dst, int size, void *src) */
+       BPF_FUNC_ktime_get_ns,    /* u64 bpf_ktime_get_ns(void) */
+       BPF_FUNC_trace_printk,    /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */
+       BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
+       BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
+
+       /**
+        * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
+        * @skb: pointer to skb
+        * @offset: offset within packet from skb->mac_header
+        * @from: pointer where to copy bytes from
+        * @len: number of bytes to store into packet
+        * @flags: bit 0 - if true, recompute skb->csum
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_skb_store_bytes,
+
+       /**
+        * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
+        * @skb: pointer to skb
+        * @offset: offset within packet where IP checksum is located
+        * @from: old value of header field
+        * @to: new value of header field
+        * @flags: bits 0-3 - size of header field
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_l3_csum_replace,
+
+       /**
+        * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
+        * @skb: pointer to skb
+        * @offset: offset within packet where TCP/UDP checksum is located
+        * @from: old value of header field
+        * @to: new value of header field
+        * @flags: bits 0-3 - size of header field
+        *         bit 4 - is pseudo header
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_l4_csum_replace,
+
+       /**
+        * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
+        * @ctx: context pointer passed to next program
+        * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
+        * @index: index inside array that selects specific program to run
+        * Return: 0 on success
+        */
+       BPF_FUNC_tail_call,
+
+       /**
+        * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
+        * @skb: pointer to skb
+        * @ifindex: ifindex of the net device
+        * @flags: bit 0 - if set, redirect to ingress instead of egress
+        *         other bits - reserved
+        * Return: 0 on success
+        */
+       BPF_FUNC_clone_redirect,
+
+       /**
+        * u64 bpf_get_current_pid_tgid(void)
+        * Return: current->tgid << 32 | current->pid
+        */
+       BPF_FUNC_get_current_pid_tgid,
+
+       /**
+        * u64 bpf_get_current_uid_gid(void)
+        * Return: current_gid << 32 | current_uid
+        */
+       BPF_FUNC_get_current_uid_gid,
+
+       /**
+        * bpf_get_current_comm(char *buf, int size_of_buf)
+        * stores current->comm into buf
+        * Return: 0 on success
+        */
+       BPF_FUNC_get_current_comm,
+
+       /**
+        * bpf_get_cgroup_classid(skb) - retrieve a proc's classid
+        * @skb: pointer to skb
+        * Return: classid if != 0
+        */
+       BPF_FUNC_get_cgroup_classid,
+       BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
+       BPF_FUNC_skb_vlan_pop,  /* bpf_skb_vlan_pop(skb) */
+
+       /**
+        * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
+        * retrieve or populate tunnel metadata
+        * @skb: pointer to skb
+        * @key: pointer to 'struct bpf_tunnel_key'
+        * @size: size of 'struct bpf_tunnel_key'
+        * @flags: room for future extensions
+        * Retrun: 0 on success
+        */
+       BPF_FUNC_skb_get_tunnel_key,
+       BPF_FUNC_skb_set_tunnel_key,
+       BPF_FUNC_perf_event_read,       /* u64 bpf_perf_event_read(&map, index) */
+       /**
+        * bpf_redirect(ifindex, flags) - redirect to another netdev
+        * @ifindex: ifindex of the net device
+        * @flags: bit 0 - if set, redirect to ingress instead of egress
+        *         other bits - reserved
+        * Return: TC_ACT_REDIRECT
+        */
+       BPF_FUNC_redirect,
+
+       /**
+        * bpf_get_route_realm(skb) - retrieve a dst's tclassid
+        * @skb: pointer to skb
+        * Return: realm if != 0
+        */
+       BPF_FUNC_get_route_realm,
+
+       /**
+        * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample
+        * @ctx: struct pt_regs*
+        * @map: pointer to perf_event_array map
+        * @index: index of event in the map
+        * @data: data on stack to be output as raw data
+        * @size: size of data
+        * Return: 0 on success
+        */
+       BPF_FUNC_perf_event_output,
+       BPF_FUNC_skb_load_bytes,
+
+       /**
+        * bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id
+        * @ctx: struct pt_regs*
+        * @map: pointer to stack_trace map
+        * @flags: bits 0-7 - numer of stack frames to skip
+        *         bit 8 - collect user stack instead of kernel
+        *         bit 9 - compare stacks by hash only
+        *         bit 10 - if two different stacks hash into the same stackid
+        *                  discard old
+        *         other bits - reserved
+        * Return: >= 0 stackid on success or negative error
+        */
+       BPF_FUNC_get_stackid,
+
+       /**
+        * bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff
+        * @from: raw from buffer
+        * @from_size: length of from buffer
+        * @to: raw to buffer
+        * @to_size: length of to buffer
+        * @seed: optional seed
+        * Return: csum result
+        */
+       BPF_FUNC_csum_diff,
+
+       /**
+        * bpf_skb_[gs]et_tunnel_opt(skb, opt, size)
+        * retrieve or populate tunnel options metadata
+        * @skb: pointer to skb
+        * @opt: pointer to raw tunnel option data
+        * @size: size of @opt
+        * Return: 0 on success for set, option size for get
+        */
+       BPF_FUNC_skb_get_tunnel_opt,
+       BPF_FUNC_skb_set_tunnel_opt,
+       __BPF_FUNC_MAX_ID,
+};
+
+/* All flags used by eBPF helper functions, placed here. */
+
+/* BPF_FUNC_skb_store_bytes flags. */
+#define BPF_F_RECOMPUTE_CSUM           (1ULL << 0)
+#define BPF_F_INVALIDATE_HASH          (1ULL << 1)
+
+/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
+ * First 4 bits are for passing the header field size.
+ */
+#define BPF_F_HDR_FIELD_MASK           0xfULL
+
+/* BPF_FUNC_l4_csum_replace flags. */
+#define BPF_F_PSEUDO_HDR               (1ULL << 4)
+#define BPF_F_MARK_MANGLED_0           (1ULL << 5)
+
+/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
+#define BPF_F_INGRESS                  (1ULL << 0)
+
+/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
+#define BPF_F_TUNINFO_IPV6             (1ULL << 0)
+
+/* BPF_FUNC_get_stackid flags. */
+#define BPF_F_SKIP_FIELD_MASK          0xffULL
+#define BPF_F_USER_STACK               (1ULL << 8)
+#define BPF_F_FAST_STACK_CMP           (1ULL << 9)
+#define BPF_F_REUSE_STACKID            (1ULL << 10)
+
+/* BPF_FUNC_skb_set_tunnel_key flags. */
+#define BPF_F_ZERO_CSUM_TX             (1ULL << 1)
+#define BPF_F_DONT_FRAGMENT            (1ULL << 2)
+
+/* BPF_FUNC_perf_event_output flags. */
+#define BPF_F_INDEX_MASK               0xffffffffULL
+#define BPF_F_CURRENT_CPU              BPF_F_INDEX_MASK
+
+/* user accessible mirror of in-kernel sk_buff.
+ * new fields can only be added to the end of this structure
+ */
+struct __sk_buff {
+       __u32 len;
+       __u32 pkt_type;
+       __u32 mark;
+       __u32 queue_mapping;
+       __u32 protocol;
+       __u32 vlan_present;
+       __u32 vlan_tci;
+       __u32 vlan_proto;
+       __u32 priority;
+       __u32 ingress_ifindex;
+       __u32 ifindex;
+       __u32 tc_index;
+       __u32 cb[5];
+       __u32 hash;
+       __u32 tc_classid;
+       __u32 data;
+       __u32 data_end;
+};
+
+struct bpf_tunnel_key {
+       __u32 tunnel_id;
+       union {
+               __u32 remote_ipv4;
+               __u32 remote_ipv6[4];
+       };
+       __u8 tunnel_tos;
+       __u8 tunnel_ttl;
+       __u16 tunnel_ext;
+       __u32 tunnel_label;
+};
+
+#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/bpf_common.h b/tools/include/uapi/linux/bpf_common.h
new file mode 100644 (file)
index 0000000..a5c220e
--- /dev/null
@@ -0,0 +1,55 @@
+#ifndef _UAPI__LINUX_BPF_COMMON_H__
+#define _UAPI__LINUX_BPF_COMMON_H__
+
+/* Instruction classes */
+#define BPF_CLASS(code) ((code) & 0x07)
+#define                BPF_LD          0x00
+#define                BPF_LDX         0x01
+#define                BPF_ST          0x02
+#define                BPF_STX         0x03
+#define                BPF_ALU         0x04
+#define                BPF_JMP         0x05
+#define                BPF_RET         0x06
+#define                BPF_MISC        0x07
+
+/* ld/ldx fields */
+#define BPF_SIZE(code)  ((code) & 0x18)
+#define                BPF_W           0x00
+#define                BPF_H           0x08
+#define                BPF_B           0x10
+#define BPF_MODE(code)  ((code) & 0xe0)
+#define                BPF_IMM         0x00
+#define                BPF_ABS         0x20
+#define                BPF_IND         0x40
+#define                BPF_MEM         0x60
+#define                BPF_LEN         0x80
+#define                BPF_MSH         0xa0
+
+/* alu/jmp fields */
+#define BPF_OP(code)    ((code) & 0xf0)
+#define                BPF_ADD         0x00
+#define                BPF_SUB         0x10
+#define                BPF_MUL         0x20
+#define                BPF_DIV         0x30
+#define                BPF_OR          0x40
+#define                BPF_AND         0x50
+#define                BPF_LSH         0x60
+#define                BPF_RSH         0x70
+#define                BPF_NEG         0x80
+#define                BPF_MOD         0x90
+#define                BPF_XOR         0xa0
+
+#define                BPF_JA          0x00
+#define                BPF_JEQ         0x10
+#define                BPF_JGT         0x20
+#define                BPF_JGE         0x30
+#define                BPF_JSET        0x40
+#define BPF_SRC(code)   ((code) & 0x08)
+#define                BPF_K           0x00
+#define                BPF_X           0x08
+
+#ifndef BPF_MAXINSNS
+#define BPF_MAXINSNS 4096
+#endif
+
+#endif /* _UAPI__LINUX_BPF_COMMON_H__ */
diff --git a/tools/include/uapi/linux/hw_breakpoint.h b/tools/include/uapi/linux/hw_breakpoint.h
new file mode 100644 (file)
index 0000000..b04000a
--- /dev/null
@@ -0,0 +1,30 @@
+#ifndef _UAPI_LINUX_HW_BREAKPOINT_H
+#define _UAPI_LINUX_HW_BREAKPOINT_H
+
+enum {
+       HW_BREAKPOINT_LEN_1 = 1,
+       HW_BREAKPOINT_LEN_2 = 2,
+       HW_BREAKPOINT_LEN_4 = 4,
+       HW_BREAKPOINT_LEN_8 = 8,
+};
+
+enum {
+       HW_BREAKPOINT_EMPTY     = 0,
+       HW_BREAKPOINT_R         = 1,
+       HW_BREAKPOINT_W         = 2,
+       HW_BREAKPOINT_RW        = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
+       HW_BREAKPOINT_X         = 4,
+       HW_BREAKPOINT_INVALID   = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
+};
+
+enum bp_type_idx {
+       TYPE_INST       = 0,
+#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
+       TYPE_DATA       = 0,
+#else
+       TYPE_DATA       = 1,
+#endif
+       TYPE_MAX
+};
+
+#endif /* _UAPI_LINUX_HW_BREAKPOINT_H */
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
new file mode 100644 (file)
index 0000000..c66a485
--- /dev/null
@@ -0,0 +1,983 @@
+/*
+ * Performance events:
+ *
+ *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ *    Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
+ *    Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
+ *
+ * Data type definitions, declarations, prototypes.
+ *
+ *    Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _UAPI_LINUX_PERF_EVENT_H
+#define _UAPI_LINUX_PERF_EVENT_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <asm/byteorder.h>
+
+/*
+ * User-space ABI bits:
+ */
+
+/*
+ * attr.type
+ */
+enum perf_type_id {
+       PERF_TYPE_HARDWARE                      = 0,
+       PERF_TYPE_SOFTWARE                      = 1,
+       PERF_TYPE_TRACEPOINT                    = 2,
+       PERF_TYPE_HW_CACHE                      = 3,
+       PERF_TYPE_RAW                           = 4,
+       PERF_TYPE_BREAKPOINT                    = 5,
+
+       PERF_TYPE_MAX,                          /* non-ABI */
+};
+
+/*
+ * Generalized performance event event_id types, used by the
+ * attr.event_id parameter of the sys_perf_event_open()
+ * syscall:
+ */
+enum perf_hw_id {
+       /*
+        * Common hardware events, generalized by the kernel:
+        */
+       PERF_COUNT_HW_CPU_CYCLES                = 0,
+       PERF_COUNT_HW_INSTRUCTIONS              = 1,
+       PERF_COUNT_HW_CACHE_REFERENCES          = 2,
+       PERF_COUNT_HW_CACHE_MISSES              = 3,
+       PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
+       PERF_COUNT_HW_BRANCH_MISSES             = 5,
+       PERF_COUNT_HW_BUS_CYCLES                = 6,
+       PERF_COUNT_HW_STALLED_CYCLES_FRONTEND   = 7,
+       PERF_COUNT_HW_STALLED_CYCLES_BACKEND    = 8,
+       PERF_COUNT_HW_REF_CPU_CYCLES            = 9,
+
+       PERF_COUNT_HW_MAX,                      /* non-ABI */
+};
+
+/*
+ * Generalized hardware cache events:
+ *
+ *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
+ *       { read, write, prefetch } x
+ *       { accesses, misses }
+ */
+enum perf_hw_cache_id {
+       PERF_COUNT_HW_CACHE_L1D                 = 0,
+       PERF_COUNT_HW_CACHE_L1I                 = 1,
+       PERF_COUNT_HW_CACHE_LL                  = 2,
+       PERF_COUNT_HW_CACHE_DTLB                = 3,
+       PERF_COUNT_HW_CACHE_ITLB                = 4,
+       PERF_COUNT_HW_CACHE_BPU                 = 5,
+       PERF_COUNT_HW_CACHE_NODE                = 6,
+
+       PERF_COUNT_HW_CACHE_MAX,                /* non-ABI */
+};
+
+enum perf_hw_cache_op_id {
+       PERF_COUNT_HW_CACHE_OP_READ             = 0,
+       PERF_COUNT_HW_CACHE_OP_WRITE            = 1,
+       PERF_COUNT_HW_CACHE_OP_PREFETCH         = 2,
+
+       PERF_COUNT_HW_CACHE_OP_MAX,             /* non-ABI */
+};
+
+enum perf_hw_cache_op_result_id {
+       PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
+       PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
+
+       PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non-ABI */
+};
+
+/*
+ * Special "software" events provided by the kernel, even if the hardware
+ * does not support performance events. These events measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum perf_sw_ids {
+       PERF_COUNT_SW_CPU_CLOCK                 = 0,
+       PERF_COUNT_SW_TASK_CLOCK                = 1,
+       PERF_COUNT_SW_PAGE_FAULTS               = 2,
+       PERF_COUNT_SW_CONTEXT_SWITCHES          = 3,
+       PERF_COUNT_SW_CPU_MIGRATIONS            = 4,
+       PERF_COUNT_SW_PAGE_FAULTS_MIN           = 5,
+       PERF_COUNT_SW_PAGE_FAULTS_MAJ           = 6,
+       PERF_COUNT_SW_ALIGNMENT_FAULTS          = 7,
+       PERF_COUNT_SW_EMULATION_FAULTS          = 8,
+       PERF_COUNT_SW_DUMMY                     = 9,
+       PERF_COUNT_SW_BPF_OUTPUT                = 10,
+
+       PERF_COUNT_SW_MAX,                      /* non-ABI */
+};
+
+/*
+ * Bits that can be set in attr.sample_type to request information
+ * in the overflow packets.
+ */
+enum perf_event_sample_format {
+       PERF_SAMPLE_IP                          = 1U << 0,
+       PERF_SAMPLE_TID                         = 1U << 1,
+       PERF_SAMPLE_TIME                        = 1U << 2,
+       PERF_SAMPLE_ADDR                        = 1U << 3,
+       PERF_SAMPLE_READ                        = 1U << 4,
+       PERF_SAMPLE_CALLCHAIN                   = 1U << 5,
+       PERF_SAMPLE_ID                          = 1U << 6,
+       PERF_SAMPLE_CPU                         = 1U << 7,
+       PERF_SAMPLE_PERIOD                      = 1U << 8,
+       PERF_SAMPLE_STREAM_ID                   = 1U << 9,
+       PERF_SAMPLE_RAW                         = 1U << 10,
+       PERF_SAMPLE_BRANCH_STACK                = 1U << 11,
+       PERF_SAMPLE_REGS_USER                   = 1U << 12,
+       PERF_SAMPLE_STACK_USER                  = 1U << 13,
+       PERF_SAMPLE_WEIGHT                      = 1U << 14,
+       PERF_SAMPLE_DATA_SRC                    = 1U << 15,
+       PERF_SAMPLE_IDENTIFIER                  = 1U << 16,
+       PERF_SAMPLE_TRANSACTION                 = 1U << 17,
+       PERF_SAMPLE_REGS_INTR                   = 1U << 18,
+
+       PERF_SAMPLE_MAX = 1U << 19,             /* non-ABI */
+};
+
+/*
+ * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
+ *
+ * If the user does not pass priv level information via branch_sample_type,
+ * the kernel uses the event's priv level. Branch and event priv levels do
+ * not have to match. Branch priv level is checked for permissions.
+ *
+ * The branch types can be combined, however BRANCH_ANY covers all types
+ * of branches and therefore it supersedes all the other types.
+ */
+enum perf_branch_sample_type_shift {
+       PERF_SAMPLE_BRANCH_USER_SHIFT           = 0, /* user branches */
+       PERF_SAMPLE_BRANCH_KERNEL_SHIFT         = 1, /* kernel branches */
+       PERF_SAMPLE_BRANCH_HV_SHIFT             = 2, /* hypervisor branches */
+
+       PERF_SAMPLE_BRANCH_ANY_SHIFT            = 3, /* any branch types */
+       PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT       = 4, /* any call branch */
+       PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT     = 5, /* any return branch */
+       PERF_SAMPLE_BRANCH_IND_CALL_SHIFT       = 6, /* indirect calls */
+       PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT       = 7, /* transaction aborts */
+       PERF_SAMPLE_BRANCH_IN_TX_SHIFT          = 8, /* in transaction */
+       PERF_SAMPLE_BRANCH_NO_TX_SHIFT          = 9, /* not in transaction */
+       PERF_SAMPLE_BRANCH_COND_SHIFT           = 10, /* conditional branches */
+
+       PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT     = 11, /* call/ret stack */
+       PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT       = 12, /* indirect jumps */
+       PERF_SAMPLE_BRANCH_CALL_SHIFT           = 13, /* direct call */
+
+       PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT       = 14, /* no flags */
+       PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT      = 15, /* no cycles */
+
+       PERF_SAMPLE_BRANCH_MAX_SHIFT            /* non-ABI */
+};
+
+enum perf_branch_sample_type {
+       PERF_SAMPLE_BRANCH_USER         = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
+       PERF_SAMPLE_BRANCH_KERNEL       = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
+       PERF_SAMPLE_BRANCH_HV           = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
+
+       PERF_SAMPLE_BRANCH_ANY          = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
+       PERF_SAMPLE_BRANCH_ANY_CALL     = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
+       PERF_SAMPLE_BRANCH_ANY_RETURN   = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
+       PERF_SAMPLE_BRANCH_IND_CALL     = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
+       PERF_SAMPLE_BRANCH_ABORT_TX     = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
+       PERF_SAMPLE_BRANCH_IN_TX        = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
+       PERF_SAMPLE_BRANCH_NO_TX        = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
+       PERF_SAMPLE_BRANCH_COND         = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
+
+       PERF_SAMPLE_BRANCH_CALL_STACK   = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+       PERF_SAMPLE_BRANCH_IND_JUMP     = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
+       PERF_SAMPLE_BRANCH_CALL         = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
+
+       PERF_SAMPLE_BRANCH_NO_FLAGS     = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
+       PERF_SAMPLE_BRANCH_NO_CYCLES    = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
+
+       PERF_SAMPLE_BRANCH_MAX          = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
+};
+
+#define PERF_SAMPLE_BRANCH_PLM_ALL \
+       (PERF_SAMPLE_BRANCH_USER|\
+        PERF_SAMPLE_BRANCH_KERNEL|\
+        PERF_SAMPLE_BRANCH_HV)
+
+/*
+ * Values to determine ABI of the registers dump.
+ */
+enum perf_sample_regs_abi {
+       PERF_SAMPLE_REGS_ABI_NONE       = 0,
+       PERF_SAMPLE_REGS_ABI_32         = 1,
+       PERF_SAMPLE_REGS_ABI_64         = 2,
+};
+
+/*
+ * Values for the memory transaction event qualifier, mostly for
+ * abort events. Multiple bits can be set.
+ */
+enum {
+       PERF_TXN_ELISION        = (1 << 0), /* From elision */
+       PERF_TXN_TRANSACTION    = (1 << 1), /* From transaction */
+       PERF_TXN_SYNC           = (1 << 2), /* Instruction is related */
+       PERF_TXN_ASYNC          = (1 << 3), /* Instruction not related */
+       PERF_TXN_RETRY          = (1 << 4), /* Retry possible */
+       PERF_TXN_CONFLICT       = (1 << 5), /* Conflict abort */
+       PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
+       PERF_TXN_CAPACITY_READ  = (1 << 7), /* Capacity read abort */
+
+       PERF_TXN_MAX            = (1 << 8), /* non-ABI */
+
+       /* bits 32..63 are reserved for the abort code */
+
+       PERF_TXN_ABORT_MASK  = (0xffffffffULL << 32),
+       PERF_TXN_ABORT_SHIFT = 32,
+};
+
+/*
+ * The format of the data returned by read() on a perf event fd,
+ * as specified by attr.read_format:
+ *
+ * struct read_format {
+ *     { u64           value;
+ *       { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ *       { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ *       { u64         id;           } && PERF_FORMAT_ID
+ *     } && !PERF_FORMAT_GROUP
+ *
+ *     { u64           nr;
+ *       { u64         time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ *       { u64         time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ *       { u64         value;
+ *         { u64       id;           } && PERF_FORMAT_ID
+ *       }             cntr[nr];
+ *     } && PERF_FORMAT_GROUP
+ * };
+ */
+enum perf_event_read_format {
+       PERF_FORMAT_TOTAL_TIME_ENABLED          = 1U << 0,
+       PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
+       PERF_FORMAT_ID                          = 1U << 2,
+       PERF_FORMAT_GROUP                       = 1U << 3,
+
+       PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
+};
+
+#define PERF_ATTR_SIZE_VER0    64      /* sizeof first published struct */
+#define PERF_ATTR_SIZE_VER1    72      /* add: config2 */
+#define PERF_ATTR_SIZE_VER2    80      /* add: branch_sample_type */
+#define PERF_ATTR_SIZE_VER3    96      /* add: sample_regs_user */
+                                       /* add: sample_stack_user */
+#define PERF_ATTR_SIZE_VER4    104     /* add: sample_regs_intr */
+#define PERF_ATTR_SIZE_VER5    112     /* add: aux_watermark */
+
+/*
+ * Hardware event_id to monitor via a performance monitoring event:
+ *
+ * @sample_max_stack: Max number of frame pointers in a callchain,
+ *                   should be < /proc/sys/kernel/perf_event_max_stack
+ */
+struct perf_event_attr {
+
+       /*
+        * Major type: hardware/software/tracepoint/etc.
+        */
+       __u32                   type;
+
+       /*
+        * Size of the attr structure, for fwd/bwd compat.
+        */
+       __u32                   size;
+
+       /*
+        * Type specific configuration information.
+        */
+       __u64                   config;
+
+       union {
+               __u64           sample_period;
+               __u64           sample_freq;
+       };
+
+       __u64                   sample_type;
+       __u64                   read_format;
+
+       __u64                   disabled       :  1, /* off by default        */
+                               inherit        :  1, /* children inherit it   */
+                               pinned         :  1, /* must always be on PMU */
+                               exclusive      :  1, /* only group on PMU     */
+                               exclude_user   :  1, /* don't count user      */
+                               exclude_kernel :  1, /* ditto kernel          */
+                               exclude_hv     :  1, /* ditto hypervisor      */
+                               exclude_idle   :  1, /* don't count when idle */
+                               mmap           :  1, /* include mmap data     */
+                               comm           :  1, /* include comm data     */
+                               freq           :  1, /* use freq, not period  */
+                               inherit_stat   :  1, /* per task counts       */
+                               enable_on_exec :  1, /* next exec enables     */
+                               task           :  1, /* trace fork/exit       */
+                               watermark      :  1, /* wakeup_watermark      */
+                               /*
+                                * precise_ip:
+                                *
+                                *  0 - SAMPLE_IP can have arbitrary skid
+                                *  1 - SAMPLE_IP must have constant skid
+                                *  2 - SAMPLE_IP requested to have 0 skid
+                                *  3 - SAMPLE_IP must have 0 skid
+                                *
+                                *  See also PERF_RECORD_MISC_EXACT_IP
+                                */
+                               precise_ip     :  2, /* skid constraint       */
+                               mmap_data      :  1, /* non-exec mmap data    */
+                               sample_id_all  :  1, /* sample_type all events */
+
+                               exclude_host   :  1, /* don't count in host   */
+                               exclude_guest  :  1, /* don't count in guest  */
+
+                               exclude_callchain_kernel : 1, /* exclude kernel callchains */
+                               exclude_callchain_user   : 1, /* exclude user callchains */
+                               mmap2          :  1, /* include mmap with inode data     */
+                               comm_exec      :  1, /* flag comm events that are due to an exec */
+                               use_clockid    :  1, /* use @clockid for time fields */
+                               context_switch :  1, /* context switch data */
+                               write_backward :  1, /* Write ring buffer from end to beginning */
+                               __reserved_1   : 36;
+
+       union {
+               __u32           wakeup_events;    /* wakeup every n events */
+               __u32           wakeup_watermark; /* bytes before wakeup   */
+       };
+
+       __u32                   bp_type;
+       union {
+               __u64           bp_addr;
+               __u64           config1; /* extension of config */
+       };
+       union {
+               __u64           bp_len;
+               __u64           config2; /* extension of config1 */
+       };
+       __u64   branch_sample_type; /* enum perf_branch_sample_type */
+
+       /*
+        * Defines set of user regs to dump on samples.
+        * See asm/perf_regs.h for details.
+        */
+       __u64   sample_regs_user;
+
+       /*
+        * Defines size of the user stack to dump on samples.
+        */
+       __u32   sample_stack_user;
+
+       __s32   clockid;
+       /*
+        * Defines set of regs to dump for each sample
+        * state captured on:
+        *  - precise = 0: PMU interrupt
+        *  - precise > 0: sampled instruction
+        *
+        * See asm/perf_regs.h for details.
+        */
+       __u64   sample_regs_intr;
+
+       /*
+        * Wakeup watermark for AUX area
+        */
+       __u32   aux_watermark;
+       __u16   sample_max_stack;
+       __u16   __reserved_2;   /* align to __u64 */
+};
+
+#define perf_flags(attr)       (*(&(attr)->read_format + 1))
+
+/*
+ * Ioctls that can be done on a perf event fd:
+ */
+#define PERF_EVENT_IOC_ENABLE          _IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE         _IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH         _IO ('$', 2)
+#define PERF_EVENT_IOC_RESET           _IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD          _IOW('$', 4, __u64)
+#define PERF_EVENT_IOC_SET_OUTPUT      _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER      _IOW('$', 6, char *)
+#define PERF_EVENT_IOC_ID              _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF         _IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT    _IOW('$', 9, __u32)
+
+enum perf_event_ioc_flags {
+       PERF_IOC_FLAG_GROUP             = 1U << 0,
+};
+
+/*
+ * Structure of the page that can be mapped via mmap
+ */
+struct perf_event_mmap_page {
+       __u32   version;                /* version number of this structure */
+       __u32   compat_version;         /* lowest version this is compat with */
+
+       /*
+        * Bits needed to read the hw events in user-space.
+        *
+        *   u32 seq, time_mult, time_shift, index, width;
+        *   u64 count, enabled, running;
+        *   u64 cyc, time_offset;
+        *   s64 pmc = 0;
+        *
+        *   do {
+        *     seq = pc->lock;
+        *     barrier()
+        *
+        *     enabled = pc->time_enabled;
+        *     running = pc->time_running;
+        *
+        *     if (pc->cap_usr_time && enabled != running) {
+        *       cyc = rdtsc();
+        *       time_offset = pc->time_offset;
+        *       time_mult   = pc->time_mult;
+        *       time_shift  = pc->time_shift;
+        *     }
+        *
+        *     index = pc->index;
+        *     count = pc->offset;
+        *     if (pc->cap_user_rdpmc && index) {
+        *       width = pc->pmc_width;
+        *       pmc = rdpmc(index - 1);
+        *     }
+        *
+        *     barrier();
+        *   } while (pc->lock != seq);
+        *
+        * NOTE: for obvious reason this only works on self-monitoring
+        *       processes.
+        */
+       __u32   lock;                   /* seqlock for synchronization */
+       __u32   index;                  /* hardware event identifier */
+       __s64   offset;                 /* add to hardware event value */
+       __u64   time_enabled;           /* time event active */
+       __u64   time_running;           /* time event on cpu */
+       union {
+               __u64   capabilities;
+               struct {
+                       __u64   cap_bit0                : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
+                               cap_bit0_is_deprecated  : 1, /* Always 1, signals that bit 0 is zero */
+
+                               cap_user_rdpmc          : 1, /* The RDPMC instruction can be used to read counts */
+                               cap_user_time           : 1, /* The time_* fields are used */
+                               cap_user_time_zero      : 1, /* The time_zero field is used */
+                               cap_____res             : 59;
+               };
+       };
+
+       /*
+        * If cap_user_rdpmc this field provides the bit-width of the value
+        * read using the rdpmc() or equivalent instruction. This can be used
+        * to sign extend the result like:
+        *
+        *   pmc <<= 64 - width;
+        *   pmc >>= 64 - width; // signed shift right
+        *   count += pmc;
+        */
+       __u16   pmc_width;
+
+       /*
+        * If cap_usr_time the below fields can be used to compute the time
+        * delta since time_enabled (in ns) using rdtsc or similar.
+        *
+        *   u64 quot, rem;
+        *   u64 delta;
+        *
+        *   quot = (cyc >> time_shift);
+        *   rem = cyc & (((u64)1 << time_shift) - 1);
+        *   delta = time_offset + quot * time_mult +
+        *              ((rem * time_mult) >> time_shift);
+        *
+        * Where time_offset,time_mult,time_shift and cyc are read in the
+        * seqcount loop described above. This delta can then be added to
+        * enabled and possible running (if index), improving the scaling:
+        *
+        *   enabled += delta;
+        *   if (index)
+        *     running += delta;
+        *
+        *   quot = count / running;
+        *   rem  = count % running;
+        *   count = quot * enabled + (rem * enabled) / running;
+        */
+       __u16   time_shift;
+       __u32   time_mult;
+       __u64   time_offset;
+       /*
+        * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
+        * from sample timestamps.
+        *
+        *   time = timestamp - time_zero;
+        *   quot = time / time_mult;
+        *   rem  = time % time_mult;
+        *   cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
+        *
+        * And vice versa:
+        *
+        *   quot = cyc >> time_shift;
+        *   rem  = cyc & (((u64)1 << time_shift) - 1);
+        *   timestamp = time_zero + quot * time_mult +
+        *               ((rem * time_mult) >> time_shift);
+        */
+       __u64   time_zero;
+       __u32   size;                   /* Header size up to __reserved[] fields. */
+
+               /*
+                * Hole for extension of the self monitor capabilities
+                */
+
+       __u8    __reserved[118*8+4];    /* align to 1k. */
+
+       /*
+        * Control data for the mmap() data buffer.
+        *
+        * User-space reading the @data_head value should issue an smp_rmb(),
+        * after reading this value.
+        *
+        * When the mapping is PROT_WRITE the @data_tail value should be
+        * written by userspace to reflect the last read data, after issueing
+        * an smp_mb() to separate the data read from the ->data_tail store.
+        * In this case the kernel will not over-write unread data.
+        *
+        * See perf_output_put_handle() for the data ordering.
+        *
+        * data_{offset,size} indicate the location and size of the perf record
+        * buffer within the mmapped area.
+        */
+       __u64   data_head;              /* head in the data section */
+       __u64   data_tail;              /* user-space written tail */
+       __u64   data_offset;            /* where the buffer starts */
+       __u64   data_size;              /* data buffer size */
+
+       /*
+        * AUX area is defined by aux_{offset,size} fields that should be set
+        * by the userspace, so that
+        *
+        *   aux_offset >= data_offset + data_size
+        *
+        * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
+        *
+        * Ring buffer pointers aux_{head,tail} have the same semantics as
+        * data_{head,tail} and same ordering rules apply.
+        */
+       __u64   aux_head;
+       __u64   aux_tail;
+       __u64   aux_offset;
+       __u64   aux_size;
+};
+
+#define PERF_RECORD_MISC_CPUMODE_MASK          (7 << 0)
+#define PERF_RECORD_MISC_CPUMODE_UNKNOWN       (0 << 0)
+#define PERF_RECORD_MISC_KERNEL                        (1 << 0)
+#define PERF_RECORD_MISC_USER                  (2 << 0)
+#define PERF_RECORD_MISC_HYPERVISOR            (3 << 0)
+#define PERF_RECORD_MISC_GUEST_KERNEL          (4 << 0)
+#define PERF_RECORD_MISC_GUEST_USER            (5 << 0)
+
+/*
+ * Indicates that /proc/PID/maps parsing are truncated by time out.
+ */
+#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT        (1 << 12)
+/*
+ * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
+ * different events so can reuse the same bit position.
+ * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ */
+#define PERF_RECORD_MISC_MMAP_DATA             (1 << 13)
+#define PERF_RECORD_MISC_COMM_EXEC             (1 << 13)
+#define PERF_RECORD_MISC_SWITCH_OUT            (1 << 13)
+/*
+ * Indicates that the content of PERF_SAMPLE_IP points to
+ * the actual instruction that triggered the event. See also
+ * perf_event_attr::precise_ip.
+ */
+#define PERF_RECORD_MISC_EXACT_IP              (1 << 14)
+/*
+ * Reserve the last bit to indicate some extended misc field
+ */
+#define PERF_RECORD_MISC_EXT_RESERVED          (1 << 15)
+
+struct perf_event_header {
+       __u32   type;
+       __u16   misc;
+       __u16   size;
+};
+
+enum perf_event_type {
+
+       /*
+        * If perf_event_attr.sample_id_all is set then all event types will
+        * have the sample_type selected fields related to where/when
+        * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
+        * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
+        * just after the perf_event_header and the fields already present for
+        * the existing fields, i.e. at the end of the payload. That way a newer
+        * perf.data file will be supported by older perf tools, with these new
+        * optional fields being ignored.
+        *
+        * struct sample_id {
+        *      { u32                   pid, tid; } && PERF_SAMPLE_TID
+        *      { u64                   time;     } && PERF_SAMPLE_TIME
+        *      { u64                   id;       } && PERF_SAMPLE_ID
+        *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
+        *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
+        *      { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
+        * } && perf_event_attr::sample_id_all
+        *
+        * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.  The
+        * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
+        * relative to header.size.
+        */
+
+       /*
+        * The MMAP events record the PROT_EXEC mappings so that we can
+        * correlate userspace IPs to code. They have the following structure:
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *
+        *      u32                             pid, tid;
+        *      u64                             addr;
+        *      u64                             len;
+        *      u64                             pgoff;
+        *      char                            filename[];
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_MMAP                        = 1,
+
+       /*
+        * struct {
+        *      struct perf_event_header        header;
+        *      u64                             id;
+        *      u64                             lost;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_LOST                        = 2,
+
+       /*
+        * struct {
+        *      struct perf_event_header        header;
+        *
+        *      u32                             pid, tid;
+        *      char                            comm[];
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_COMM                        = 3,
+
+       /*
+        * struct {
+        *      struct perf_event_header        header;
+        *      u32                             pid, ppid;
+        *      u32                             tid, ptid;
+        *      u64                             time;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_EXIT                        = 4,
+
+       /*
+        * struct {
+        *      struct perf_event_header        header;
+        *      u64                             time;
+        *      u64                             id;
+        *      u64                             stream_id;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_THROTTLE                    = 5,
+       PERF_RECORD_UNTHROTTLE                  = 6,
+
+       /*
+        * struct {
+        *      struct perf_event_header        header;
+        *      u32                             pid, ppid;
+        *      u32                             tid, ptid;
+        *      u64                             time;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_FORK                        = 7,
+
+       /*
+        * struct {
+        *      struct perf_event_header        header;
+        *      u32                             pid, tid;
+        *
+        *      struct read_format              values;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_READ                        = 8,
+
+       /*
+        * struct {
+        *      struct perf_event_header        header;
+        *
+        *      #
+        *      # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
+        *      # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
+        *      # is fixed relative to header.
+        *      #
+        *
+        *      { u64                   id;       } && PERF_SAMPLE_IDENTIFIER
+        *      { u64                   ip;       } && PERF_SAMPLE_IP
+        *      { u32                   pid, tid; } && PERF_SAMPLE_TID
+        *      { u64                   time;     } && PERF_SAMPLE_TIME
+        *      { u64                   addr;     } && PERF_SAMPLE_ADDR
+        *      { u64                   id;       } && PERF_SAMPLE_ID
+        *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
+        *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
+        *      { u64                   period;   } && PERF_SAMPLE_PERIOD
+        *
+        *      { struct read_format    values;   } && PERF_SAMPLE_READ
+        *
+        *      { u64                   nr,
+        *        u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
+        *
+        *      #
+        *      # The RAW record below is opaque data wrt the ABI
+        *      #
+        *      # That is, the ABI doesn't make any promises wrt to
+        *      # the stability of its content, it may vary depending
+        *      # on event, hardware, kernel version and phase of
+        *      # the moon.
+        *      #
+        *      # In other words, PERF_SAMPLE_RAW contents are not an ABI.
+        *      #
+        *
+        *      { u32                   size;
+        *        char                  data[size];}&& PERF_SAMPLE_RAW
+        *
+        *      { u64                   nr;
+        *        { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+        *
+        *      { u64                   abi; # enum perf_sample_regs_abi
+        *        u64                   regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
+        *
+        *      { u64                   size;
+        *        char                  data[size];
+        *        u64                   dyn_size; } && PERF_SAMPLE_STACK_USER
+        *
+        *      { u64                   weight;   } && PERF_SAMPLE_WEIGHT
+        *      { u64                   data_src; } && PERF_SAMPLE_DATA_SRC
+        *      { u64                   transaction; } && PERF_SAMPLE_TRANSACTION
+        *      { u64                   abi; # enum perf_sample_regs_abi
+        *        u64                   regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
+        * };
+        */
+       PERF_RECORD_SAMPLE                      = 9,
+
+       /*
+        * The MMAP2 records are an augmented version of MMAP, they add
+        * maj, min, ino numbers to be used to uniquely identify each mapping
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *
+        *      u32                             pid, tid;
+        *      u64                             addr;
+        *      u64                             len;
+        *      u64                             pgoff;
+        *      u32                             maj;
+        *      u32                             min;
+        *      u64                             ino;
+        *      u64                             ino_generation;
+        *      u32                             prot, flags;
+        *      char                            filename[];
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_MMAP2                       = 10,
+
+       /*
+        * Records that new data landed in the AUX buffer part.
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *
+        *      u64                             aux_offset;
+        *      u64                             aux_size;
+        *      u64                             flags;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_AUX                         = 11,
+
+       /*
+        * Indicates that instruction trace has started
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *      u32                             pid;
+        *      u32                             tid;
+        * };
+        */
+       PERF_RECORD_ITRACE_START                = 12,
+
+       /*
+        * Records the dropped/lost sample number.
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *
+        *      u64                             lost;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_LOST_SAMPLES                = 13,
+
+       /*
+        * Records a context switch in or out (flagged by
+        * PERF_RECORD_MISC_SWITCH_OUT). See also
+        * PERF_RECORD_SWITCH_CPU_WIDE.
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_SWITCH                      = 14,
+
+       /*
+        * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
+        * next_prev_tid that are the next (switching out) or previous
+        * (switching in) pid/tid.
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *      u32                             next_prev_pid;
+        *      u32                             next_prev_tid;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_SWITCH_CPU_WIDE             = 15,
+
+       PERF_RECORD_MAX,                        /* non-ABI */
+};
+
+#define PERF_MAX_STACK_DEPTH           127
+#define PERF_MAX_CONTEXTS_PER_STACK      8
+
+enum perf_callchain_context {
+       PERF_CONTEXT_HV                 = (__u64)-32,
+       PERF_CONTEXT_KERNEL             = (__u64)-128,
+       PERF_CONTEXT_USER               = (__u64)-512,
+
+       PERF_CONTEXT_GUEST              = (__u64)-2048,
+       PERF_CONTEXT_GUEST_KERNEL       = (__u64)-2176,
+       PERF_CONTEXT_GUEST_USER         = (__u64)-2560,
+
+       PERF_CONTEXT_MAX                = (__u64)-4095,
+};
+
+/**
+ * PERF_RECORD_AUX::flags bits
+ */
+#define PERF_AUX_FLAG_TRUNCATED                0x01    /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE                0x02    /* snapshot from overwrite mode */
+
+#define PERF_FLAG_FD_NO_GROUP          (1UL << 0)
+#define PERF_FLAG_FD_OUTPUT            (1UL << 1)
+#define PERF_FLAG_PID_CGROUP           (1UL << 2) /* pid=cgroup id, per-cpu mode only */
+#define PERF_FLAG_FD_CLOEXEC           (1UL << 3) /* O_CLOEXEC */
+
+union perf_mem_data_src {
+       __u64 val;
+       struct {
+               __u64   mem_op:5,       /* type of opcode */
+                       mem_lvl:14,     /* memory hierarchy level */
+                       mem_snoop:5,    /* snoop mode */
+                       mem_lock:2,     /* lock instr */
+                       mem_dtlb:7,     /* tlb access */
+                       mem_rsvd:31;
+       };
+};
+
+/* type of opcode (load/store/prefetch,code) */
+#define PERF_MEM_OP_NA         0x01 /* not available */
+#define PERF_MEM_OP_LOAD       0x02 /* load instruction */
+#define PERF_MEM_OP_STORE      0x04 /* store instruction */
+#define PERF_MEM_OP_PFETCH     0x08 /* prefetch */
+#define PERF_MEM_OP_EXEC       0x10 /* code (execution) */
+#define PERF_MEM_OP_SHIFT      0
+
+/* memory hierarchy (memory level, hit or miss) */
+#define PERF_MEM_LVL_NA                0x01  /* not available */
+#define PERF_MEM_LVL_HIT       0x02  /* hit level */
+#define PERF_MEM_LVL_MISS      0x04  /* miss level  */
+#define PERF_MEM_LVL_L1                0x08  /* L1 */
+#define PERF_MEM_LVL_LFB       0x10  /* Line Fill Buffer */
+#define PERF_MEM_LVL_L2                0x20  /* L2 */
+#define PERF_MEM_LVL_L3                0x40  /* L3 */
+#define PERF_MEM_LVL_LOC_RAM   0x80  /* Local DRAM */
+#define PERF_MEM_LVL_REM_RAM1  0x100 /* Remote DRAM (1 hop) */
+#define PERF_MEM_LVL_REM_RAM2  0x200 /* Remote DRAM (2 hops) */
+#define PERF_MEM_LVL_REM_CCE1  0x400 /* Remote Cache (1 hop) */
+#define PERF_MEM_LVL_REM_CCE2  0x800 /* Remote Cache (2 hops) */
+#define PERF_MEM_LVL_IO                0x1000 /* I/O memory */
+#define PERF_MEM_LVL_UNC       0x2000 /* Uncached memory */
+#define PERF_MEM_LVL_SHIFT     5
+
+/* snoop mode */
+#define PERF_MEM_SNOOP_NA      0x01 /* not available */
+#define PERF_MEM_SNOOP_NONE    0x02 /* no snoop */
+#define PERF_MEM_SNOOP_HIT     0x04 /* snoop hit */
+#define PERF_MEM_SNOOP_MISS    0x08 /* snoop miss */
+#define PERF_MEM_SNOOP_HITM    0x10 /* snoop hit modified */
+#define PERF_MEM_SNOOP_SHIFT   19
+
+/* locked instruction */
+#define PERF_MEM_LOCK_NA       0x01 /* not available */
+#define PERF_MEM_LOCK_LOCKED   0x02 /* locked transaction */
+#define PERF_MEM_LOCK_SHIFT    24
+
+/* TLB access */
+#define PERF_MEM_TLB_NA                0x01 /* not available */
+#define PERF_MEM_TLB_HIT       0x02 /* hit level */
+#define PERF_MEM_TLB_MISS      0x04 /* miss level */
+#define PERF_MEM_TLB_L1                0x08 /* L1 */
+#define PERF_MEM_TLB_L2                0x10 /* L2 */
+#define PERF_MEM_TLB_WK                0x20 /* Hardware Walker*/
+#define PERF_MEM_TLB_OS                0x40 /* OS fault handler */
+#define PERF_MEM_TLB_SHIFT     26
+
+#define PERF_MEM_S(a, s) \
+       (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
+
+/*
+ * single taken branch record layout:
+ *
+ *      from: source instruction (may not always be a branch insn)
+ *        to: branch target
+ *   mispred: branch target was mispredicted
+ * predicted: branch target was predicted
+ *
+ * support for mispred, predicted is optional. In case it
+ * is not supported mispred = predicted = 0.
+ *
+ *     in_tx: running in a hardware transaction
+ *     abort: aborting a hardware transaction
+ *    cycles: cycles from last branch (or 0 if not supported)
+ */
+struct perf_branch_entry {
+       __u64   from;
+       __u64   to;
+       __u64   mispred:1,  /* target mispredicted */
+               predicted:1,/* target predicted */
+               in_tx:1,    /* in transaction */
+               abort:1,    /* transaction abort */
+               cycles:16,  /* cycle count to last branch */
+               reserved:44;
+};
+
+#endif /* _UAPI_LINUX_PERF_EVENT_H */
index 316f308a63ea691c4dfb258066a7f6e90bb8b199..0a6fda9837f7dd3a60a89fbe3083bf8a3835b6f3 100644 (file)
@@ -10,15 +10,23 @@ endif
 
 CC = $(CROSS_COMPILE)gcc
 AR = $(CROSS_COMPILE)ar
+LD = $(CROSS_COMPILE)ld
 
 MAKEFLAGS += --no-print-directory
 
 LIBFILE = $(OUTPUT)libapi.a
 
 CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+
+# Treat warnings as errors unless directed not to
+ifneq ($(WERROR),0)
+  CFLAGS += -Werror
+endif
+
 CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 CFLAGS += -I$(srctree)/tools/lib/api
+CFLAGS += -I$(srctree)/tools/include
 
 RM = rm -f
 
index 0e636c4339b89887b805413a5bc3509585b9d346..b0a035fc87b310cb745716a5bc2af2a570dced52 100644 (file)
@@ -85,7 +85,8 @@ int fdarray__add(struct fdarray *fda, int fd, short revents)
 }
 
 int fdarray__filter(struct fdarray *fda, short revents,
-                   void (*entry_destructor)(struct fdarray *fda, int fd))
+                   void (*entry_destructor)(struct fdarray *fda, int fd, void *arg),
+                   void *arg)
 {
        int fd, nr = 0;
 
@@ -95,7 +96,7 @@ int fdarray__filter(struct fdarray *fda, short revents,
        for (fd = 0; fd < fda->nr; ++fd) {
                if (fda->entries[fd].revents & revents) {
                        if (entry_destructor)
-                               entry_destructor(fda, fd);
+                               entry_destructor(fda, fd, arg);
 
                        continue;
                }
index 45db01818f45c92bca6588eae4f2058b4a67bc73..71287dddc05fbf7ba58772fc7833a1a6d1505f13 100644 (file)
@@ -22,6 +22,7 @@ struct fdarray {
        struct pollfd *entries;
        union {
                int    idx;
+               void   *ptr;
        } *priv;
 };
 
@@ -34,7 +35,8 @@ void fdarray__delete(struct fdarray *fda);
 int fdarray__add(struct fdarray *fda, int fd, short revents);
 int fdarray__poll(struct fdarray *fda, int timeout);
 int fdarray__filter(struct fdarray *fda, short revents,
-                   void (*entry_destructor)(struct fdarray *fda, int fd));
+                   void (*entry_destructor)(struct fdarray *fda, int fd, void *arg),
+                   void *arg);
 int fdarray__grow(struct fdarray *fda, int extra);
 int fdarray__fprintf(struct fdarray *fda, FILE *fp);
 
index 08556cf2c70d400666de1b4b2db6d7d2d05c0e04..ba7094b945ffc7d312902dc8723acf86be68a6f5 100644 (file)
@@ -283,6 +283,11 @@ int filename__read_int(const char *filename, int *value)
        return err;
 }
 
+/*
+ * Parses @value out of @filename with strtoull.
+ * By using 0 for base, the strtoull detects the
+ * base automatically (see man strtoull).
+ */
 int filename__read_ull(const char *filename, unsigned long long *value)
 {
        char line[64];
@@ -292,7 +297,7 @@ int filename__read_ull(const char *filename, unsigned long long *value)
                return -1;
 
        if (read(fd, line, sizeof(line)) > 0) {
-               *value = strtoull(line, NULL, 10);
+               *value = strtoull(line, NULL, 0);
                if (*value != ULLONG_MAX)
                        err = 0;
        }
index a26bb5ea828307358803bc369bbb3c200d266b3d..251b7c342a87d1d78cbef0cebe2e6aafbb10588d 100644 (file)
@@ -5,6 +5,7 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <linux/string.h>
 #include <errno.h>
 #include <unistd.h>
 #include "fs.h"
@@ -118,7 +119,7 @@ static int strerror_open(int err, char *buf, size_t size, const char *filename)
        }
                break;
        default:
-               snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
+               snprintf(buf, size, "%s", str_error_r(err, sbuf, sizeof(sbuf)));
                break;
        }
 
index fc1bc75ae56d4fdbc3e8cd906cfc6274137e4aa5..62d89d50fcbd0484e7c38883b9d3d512e627eaef 100644 (file)
@@ -68,7 +68,7 @@ FEATURE_USER = .libbpf
 FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf
 FEATURE_DISPLAY = libelf bpf
 
-INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
+INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
 FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
 
 check_feat := 1
@@ -154,6 +154,12 @@ all: fixdep $(VERSION_FILES) all_cmd
 all_cmd: $(CMD_TARGETS)
 
 $(BPF_IN): force elfdep bpfdep
+       @(test -f ../../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
+       (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
+       echo "Warning: tools/include/uapi/linux/bpf.h differs from kernel" >&2 )) || true
+       @(test -f ../../../include/uapi/linux/bpf_common.h -a -f ../../../include/uapi/linux/bpf_common.h && ( \
+       (diff -B ../../include/uapi/linux/bpf_common.h ../../../include/uapi/linux/bpf_common.h >/dev/null) || \
+       echo "Warning: tools/include/uapi/linux/bpf_common.h differs from kernel" >&2 )) || true
        $(Q)$(MAKE) $(build)=libbpf
 
 $(OUTPUT)libbpf.so: $(BPF_IN)
index 1f91cc941b7c2b14243f6ffe4118a21a2ac39c83..4212ed62235b46bc687929cdc38f1002a452b6c2 100644 (file)
@@ -4,6 +4,19 @@
  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  * Copyright (C) 2015 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
  */
 
 #include <stdlib.h>
index a7646554129299bff7ba189506bd51cea406ec1d..e8ba540874972c4f0c02204a6f28b00782b7673e 100644 (file)
@@ -4,6 +4,19 @@
  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  * Copyright (C) 2015 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
  */
 #ifndef __BPF_BPF_H
 #define __BPF_BPF_H
index 7e543c3102d4118a09717bb6fb4c0f779532ae91..32e6b6bc6f7d918bc4ceb259021be1cedbceec38 100644 (file)
@@ -4,6 +4,19 @@
  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  * Copyright (C) 2015 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
  */
 
 #include <stdlib.h>
@@ -71,12 +84,13 @@ static const char *libbpf_strerror_table[NR_ERRNO] = {
        [ERRCODE_OFFSET(LIBELF)]        = "Something wrong in libelf",
        [ERRCODE_OFFSET(FORMAT)]        = "BPF object format invalid",
        [ERRCODE_OFFSET(KVERSION)]      = "'version' section incorrect or lost",
-       [ERRCODE_OFFSET(ENDIAN)]        = "Endian missmatch",
+       [ERRCODE_OFFSET(ENDIAN)]        = "Endian mismatch",
        [ERRCODE_OFFSET(INTERNAL)]      = "Internal error in libbpf",
        [ERRCODE_OFFSET(RELOC)]         = "Relocation failed",
        [ERRCODE_OFFSET(VERIFY)]        = "Kernel verifier blocks program loading",
        [ERRCODE_OFFSET(PROG2BIG)]      = "Program too big",
        [ERRCODE_OFFSET(KVER)]          = "Incorrect kernel version",
+       [ERRCODE_OFFSET(PROGTYPE)]      = "Kernel doesn't support this program type",
 };
 
 int libbpf_strerror(int err, char *buf, size_t size)
@@ -145,6 +159,7 @@ struct bpf_program {
        char *section_name;
        struct bpf_insn *insns;
        size_t insns_cnt;
+       enum bpf_prog_type type;
 
        struct {
                int insn_idx;
@@ -286,6 +301,7 @@ bpf_program__init(void *data, size_t size, char *name, int idx,
        prog->idx = idx;
        prog->instances.fds = NULL;
        prog->instances.nr = -1;
+       prog->type = BPF_PROG_TYPE_KPROBE;
 
        return 0;
 errout:
@@ -881,8 +897,8 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
 }
 
 static int
-load_program(struct bpf_insn *insns, int insns_cnt,
-            char *license, u32 kern_version, int *pfd)
+load_program(enum bpf_prog_type type, struct bpf_insn *insns,
+            int insns_cnt, char *license, u32 kern_version, int *pfd)
 {
        int ret;
        char *log_buf;
@@ -894,9 +910,8 @@ load_program(struct bpf_insn *insns, int insns_cnt,
        if (!log_buf)
                pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
 
-       ret = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
-                              insns_cnt, license, kern_version,
-                              log_buf, BPF_LOG_BUF_SIZE);
+       ret = bpf_load_program(type, insns, insns_cnt, license,
+                              kern_version, log_buf, BPF_LOG_BUF_SIZE);
 
        if (ret >= 0) {
                *pfd = ret;
@@ -912,15 +927,27 @@ load_program(struct bpf_insn *insns, int insns_cnt,
                pr_warning("-- BEGIN DUMP LOG ---\n");
                pr_warning("\n%s\n", log_buf);
                pr_warning("-- END LOG --\n");
+       } else if (insns_cnt >= BPF_MAXINSNS) {
+               pr_warning("Program too large (%d insns), at most %d insns\n",
+                          insns_cnt, BPF_MAXINSNS);
+               ret = -LIBBPF_ERRNO__PROG2BIG;
        } else {
-               if (insns_cnt >= BPF_MAXINSNS) {
-                       pr_warning("Program too large (%d insns), at most %d insns\n",
-                                  insns_cnt, BPF_MAXINSNS);
-                       ret = -LIBBPF_ERRNO__PROG2BIG;
-               } else if (log_buf) {
-                       pr_warning("log buffer is empty\n");
-                       ret = -LIBBPF_ERRNO__KVER;
+               /* Wrong program type? */
+               if (type != BPF_PROG_TYPE_KPROBE) {
+                       int fd;
+
+                       fd = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
+                                             insns_cnt, license, kern_version,
+                                             NULL, 0);
+                       if (fd >= 0) {
+                               close(fd);
+                               ret = -LIBBPF_ERRNO__PROGTYPE;
+                               goto out;
+                       }
                }
+
+               if (log_buf)
+                       ret = -LIBBPF_ERRNO__KVER;
        }
 
 out:
@@ -955,7 +982,7 @@ bpf_program__load(struct bpf_program *prog,
                        pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
                                   prog->section_name, prog->instances.nr);
                }
-               err = load_program(prog->insns, prog->insns_cnt,
+               err = load_program(prog->type, prog->insns, prog->insns_cnt,
                                   license, kern_version, &fd);
                if (!err)
                        prog->instances.fds[0] = fd;
@@ -984,7 +1011,7 @@ bpf_program__load(struct bpf_program *prog,
                        continue;
                }
 
-               err = load_program(result.new_insn_ptr,
+               err = load_program(prog->type, result.new_insn_ptr,
                                   result.new_insn_cnt,
                                   license, kern_version, &fd);
 
@@ -1186,20 +1213,14 @@ bpf_object__next(struct bpf_object *prev)
        return next;
 }
 
-const char *
-bpf_object__get_name(struct bpf_object *obj)
+const char *bpf_object__name(struct bpf_object *obj)
 {
-       if (!obj)
-               return ERR_PTR(-EINVAL);
-       return obj->path;
+       return obj ? obj->path : ERR_PTR(-EINVAL);
 }
 
-unsigned int
-bpf_object__get_kversion(struct bpf_object *obj)
+unsigned int bpf_object__kversion(struct bpf_object *obj)
 {
-       if (!obj)
-               return 0;
-       return obj->kern_version;
+       return obj ? obj->kern_version : 0;
 }
 
 struct bpf_program *
@@ -1224,9 +1245,8 @@ bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
        return &obj->programs[idx];
 }
 
-int bpf_program__set_private(struct bpf_program *prog,
-                            void *priv,
-                            bpf_program_clear_priv_t clear_priv)
+int bpf_program__set_priv(struct bpf_program *prog, void *priv,
+                         bpf_program_clear_priv_t clear_priv)
 {
        if (prog->priv && prog->clear_priv)
                prog->clear_priv(prog, prog->priv);
@@ -1236,10 +1256,9 @@ int bpf_program__set_private(struct bpf_program *prog,
        return 0;
 }
 
-int bpf_program__get_private(struct bpf_program *prog, void **ppriv)
+void *bpf_program__priv(struct bpf_program *prog)
 {
-       *ppriv = prog->priv;
-       return 0;
+       return prog ? prog->priv : ERR_PTR(-EINVAL);
 }
 
 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
@@ -1311,32 +1330,61 @@ int bpf_program__nth_fd(struct bpf_program *prog, int n)
        return fd;
 }
 
-int bpf_map__get_fd(struct bpf_map *map)
+static void bpf_program__set_type(struct bpf_program *prog,
+                                 enum bpf_prog_type type)
 {
-       if (!map)
-               return -EINVAL;
-
-       return map->fd;
+       prog->type = type;
 }
 
-int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef)
+int bpf_program__set_tracepoint(struct bpf_program *prog)
 {
-       if (!map || !pdef)
+       if (!prog)
                return -EINVAL;
+       bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
+       return 0;
+}
 
-       *pdef = map->def;
+int bpf_program__set_kprobe(struct bpf_program *prog)
+{
+       if (!prog)
+               return -EINVAL;
+       bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
        return 0;
 }
 
-const char *bpf_map__get_name(struct bpf_map *map)
+static bool bpf_program__is_type(struct bpf_program *prog,
+                                enum bpf_prog_type type)
 {
-       if (!map)
-               return NULL;
-       return map->name;
+       return prog ? (prog->type == type) : false;
+}
+
+bool bpf_program__is_tracepoint(struct bpf_program *prog)
+{
+       return bpf_program__is_type(prog, BPF_PROG_TYPE_TRACEPOINT);
+}
+
+bool bpf_program__is_kprobe(struct bpf_program *prog)
+{
+       return bpf_program__is_type(prog, BPF_PROG_TYPE_KPROBE);
+}
+
+int bpf_map__fd(struct bpf_map *map)
+{
+       return map ? map->fd : -EINVAL;
 }
 
-int bpf_map__set_private(struct bpf_map *map, void *priv,
-                        bpf_map_clear_priv_t clear_priv)
+const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
+{
+       return map ? &map->def : ERR_PTR(-EINVAL);
+}
+
+const char *bpf_map__name(struct bpf_map *map)
+{
+       return map ? map->name : NULL;
+}
+
+int bpf_map__set_priv(struct bpf_map *map, void *priv,
+                    bpf_map_clear_priv_t clear_priv)
 {
        if (!map)
                return -EINVAL;
@@ -1351,14 +1399,9 @@ int bpf_map__set_private(struct bpf_map *map, void *priv,
        return 0;
 }
 
-int bpf_map__get_private(struct bpf_map *map, void **ppriv)
+void *bpf_map__priv(struct bpf_map *map)
 {
-       if (!map)
-               return -EINVAL;
-
-       if (ppriv)
-               *ppriv = map->priv;
-       return 0;
+       return map ? map->priv : ERR_PTR(-EINVAL);
 }
 
 struct bpf_map *
@@ -1389,7 +1432,7 @@ bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
 }
 
 struct bpf_map *
-bpf_object__get_map_by_name(struct bpf_object *obj, const char *name)
+bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
 {
        struct bpf_map *pos;
 
index a51594c7b51865140f6e6b42c8d1400b951bea82..dd7a513efb10e6337d962fa50f554ad73831ba67 100644 (file)
@@ -4,6 +4,19 @@
  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
  * Copyright (C) 2015 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not,  see <http://www.gnu.org/licenses>
  */
 #ifndef __BPF_LIBBPF_H
 #define __BPF_LIBBPF_H
@@ -19,13 +32,14 @@ enum libbpf_errno {
        LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START,
        LIBBPF_ERRNO__FORMAT,   /* BPF object format invalid */
        LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */
-       LIBBPF_ERRNO__ENDIAN,   /* Endian missmatch */
+       LIBBPF_ERRNO__ENDIAN,   /* Endian mismatch */
        LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */
        LIBBPF_ERRNO__RELOC,    /* Relocation failed */
        LIBBPF_ERRNO__LOAD,     /* Load program failure for unknown reason */
        LIBBPF_ERRNO__VERIFY,   /* Kernel verifier blocks program loading */
        LIBBPF_ERRNO__PROG2BIG, /* Program too big */
        LIBBPF_ERRNO__KVER,     /* Incorrect kernel version */
+       LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */
        __LIBBPF_ERRNO__END,
 };
 
@@ -55,8 +69,8 @@ void bpf_object__close(struct bpf_object *object);
 /* Load/unload object into/from kernel */
 int bpf_object__load(struct bpf_object *obj);
 int bpf_object__unload(struct bpf_object *obj);
-const char *bpf_object__get_name(struct bpf_object *obj);
-unsigned int bpf_object__get_kversion(struct bpf_object *obj);
+const char *bpf_object__name(struct bpf_object *obj);
+unsigned int bpf_object__kversion(struct bpf_object *obj);
 
 struct bpf_object *bpf_object__next(struct bpf_object *prev);
 #define bpf_object__for_each_safe(pos, tmp)                    \
@@ -78,11 +92,10 @@ struct bpf_program *bpf_program__next(struct bpf_program *prog,
 typedef void (*bpf_program_clear_priv_t)(struct bpf_program *,
                                         void *);
 
-int bpf_program__set_private(struct bpf_program *prog, void *priv,
-                            bpf_program_clear_priv_t clear_priv);
+int bpf_program__set_priv(struct bpf_program *prog, void *priv,
+                         bpf_program_clear_priv_t clear_priv);
 
-int bpf_program__get_private(struct bpf_program *prog,
-                            void **ppriv);
+void *bpf_program__priv(struct bpf_program *prog);
 
 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
 
@@ -152,6 +165,15 @@ int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
 
 int bpf_program__nth_fd(struct bpf_program *prog, int n);
 
+/*
+ * Adjust type of bpf program. Default is kprobe.
+ */
+int bpf_program__set_tracepoint(struct bpf_program *prog);
+int bpf_program__set_kprobe(struct bpf_program *prog);
+
+bool bpf_program__is_tracepoint(struct bpf_program *prog);
+bool bpf_program__is_kprobe(struct bpf_program *prog);
+
 /*
  * We don't need __attribute__((packed)) now since it is
  * unnecessary for 'bpf_map_def' because they are all aligned.
@@ -171,7 +193,7 @@ struct bpf_map_def {
  */
 struct bpf_map;
 struct bpf_map *
-bpf_object__get_map_by_name(struct bpf_object *obj, const char *name);
+bpf_object__find_map_by_name(struct bpf_object *obj, const char *name);
 
 struct bpf_map *
 bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
@@ -180,13 +202,13 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
             (pos) != NULL;                             \
             (pos) = bpf_map__next((pos), (obj)))
 
-int bpf_map__get_fd(struct bpf_map *map);
-int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef);
-const char *bpf_map__get_name(struct bpf_map *map);
+int bpf_map__fd(struct bpf_map *map);
+const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
+const char *bpf_map__name(struct bpf_map *map);
 
 typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
-int bpf_map__set_private(struct bpf_map *map, void *priv,
-                        bpf_map_clear_priv_t clear_priv);
-int bpf_map__get_private(struct bpf_map *map, void **ppriv);
+int bpf_map__set_priv(struct bpf_map *map, void *priv,
+                     bpf_map_clear_priv_t clear_priv);
+void *bpf_map__priv(struct bpf_map *map);
 
 #endif
diff --git a/tools/lib/str_error_r.c b/tools/lib/str_error_r.c
new file mode 100644 (file)
index 0000000..503ae07
--- /dev/null
@@ -0,0 +1,26 @@
+#undef _GNU_SOURCE
+#include <string.h>
+#include <stdio.h>
+#include <linux/string.h>
+
+/*
+ * The tools so far have been using the strerror_r() GNU variant, that returns
+ * a string, be it the buffer passed or something else.
+ *
+ * But that, besides being tricky in cases where we expect that the function
+ * using strerror_r() returns the error formatted in a provided buffer (we have
+ * to check if it returned something else and copy that instead), breaks the
+ * build on systems not using glibc, like Alpine Linux, where musl libc is
+ * used.
+ *
+ * So, introduce yet another wrapper, str_error_r(), that has the GNU
+ * interface, but uses the portable XSI variant of strerror_r(), so that users
+ * rest asured that the provided buffer is used and it is what is returned.
+ */
+char *str_error_r(int errnum, char *buf, size_t buflen)
+{
+       int err = strerror_r(errnum, buf, buflen);
+       if (err)
+               snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, %p, %zd)=%d", errnum, buf, buflen, err);
+       return buf;
+}
index a8103700c172c6a24eb2db7fd88c56594f9136cf..ce4b7e5275661774d6db64e1eeaa5086c7de732a 100644 (file)
@@ -19,7 +19,13 @@ MAKEFLAGS += --no-print-directory
 LIBFILE = $(OUTPUT)libsubcmd.a
 
 CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+
+# Treat warnings as errors unless directed not to
+ifneq ($(WERROR),0)
+  CFLAGS += -Werror
+endif
+
 CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 
 CFLAGS += -I$(srctree)/tools/include/
index f4f6c9eb8e596bb8ea8dbfa756b3fe2e2ae424e8..911f83942723acd8a1338f90582cc9561082cad6 100644 (file)
@@ -3,6 +3,7 @@
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <string.h>
+#include <linux/string.h>
 #include <errno.h>
 #include <sys/wait.h>
 #include "subcmd-util.h"
@@ -109,7 +110,7 @@ int start_command(struct child_process *cmd)
 
                if (cmd->dir && chdir(cmd->dir))
                        die("exec %s: cd to %s failed (%s)", cmd->argv[0],
-                           cmd->dir, strerror_r(errno, sbuf, sizeof(sbuf)));
+                           cmd->dir, str_error_r(errno, sbuf, sizeof(sbuf)));
                if (cmd->env) {
                        for (; *cmd->env; cmd->env++) {
                                if (strchr(*cmd->env, '='))
@@ -173,7 +174,7 @@ static int wait_or_whine(pid_t pid)
                        if (errno == EINTR)
                                continue;
                        fprintf(stderr, " Error: waitpid failed (%s)",
-                               strerror_r(errno, sbuf, sizeof(sbuf)));
+                               str_error_r(errno, sbuf, sizeof(sbuf)));
                        return -ERR_RUN_COMMAND_WAITPID;
                }
                if (waiting != pid)
index a8b6357d1ffef8678d82b0ae357b445b9ba2cf71..664c90c8e22ba429a9f7e00f588270e16d985ba4 100644 (file)
@@ -23,6 +23,7 @@
  *  Frederic Weisbecker gave his permission to relicense the code to
  *  the Lesser General Public License.
  */
+#include <inttypes.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -31,8 +32,9 @@
 #include <errno.h>
 #include <stdint.h>
 #include <limits.h>
+#include <linux/string.h>
 
-#include <netinet/ip6.h>
+#include <netinet/in.h>
 #include "event-parse.h"
 #include "event-utils.h"
 
@@ -6131,12 +6133,7 @@ int pevent_strerror(struct pevent *pevent __maybe_unused,
        const char *msg;
 
        if (errnum >= 0) {
-               msg = strerror_r(errnum, buf, buflen);
-               if (msg != buf) {
-                       size_t len = strlen(msg);
-                       memcpy(buf, msg, min(buflen - 1, len));
-                       *(buf + min(buflen - 1, len)) = '\0';
-               }
+               str_error_r(errnum, buf, buflen);
                return 0;
        }
 
index 88cccea3ca9910314bf8024545c76620051fa263..7c214ceb93860abea126f086da2e4e823417fc01 100644 (file)
 #include "event-utils.h"
 
 #define COMM "COMM"
+#define CPU "CPU"
 
 static struct format_field comm = {
        .name = "COMM",
 };
 
+static struct format_field cpu = {
+       .name = "CPU",
+};
+
 struct event_list {
        struct event_list       *next;
        struct event_format     *event;
@@ -382,14 +387,17 @@ create_arg_item(struct event_format *event, const char *token,
                /* Consider this a field */
                field = pevent_find_any_field(event, token);
                if (!field) {
-                       if (strcmp(token, COMM) != 0) {
+                       /* If token is 'COMM' or 'CPU' then it is special */
+                       if (strcmp(token, COMM) == 0) {
+                               field = &comm;
+                       } else if (strcmp(token, CPU) == 0) {
+                               field = &cpu;
+                       } else {
                                /* not a field, Make it false */
                                arg->type = FILTER_ARG_BOOLEAN;
                                arg->boolean.value = FILTER_FALSE;
                                break;
                        }
-                       /* If token is 'COMM' then it is special */
-                       field = &comm;
                }
                arg->type = FILTER_ARG_FIELD;
                arg->field.field = field;
@@ -1718,6 +1726,10 @@ get_value(struct event_format *event,
                return (unsigned long)name;
        }
 
+       /* Handle our dummy "cpu" field */
+       if (field == &cpu)
+               return record->cpu;
+
        pevent_read_number_field(field, record->data, &val);
 
        if (!(field->flags & FIELD_IS_SIGNED))
diff --git a/tools/lib/vsprintf.c b/tools/lib/vsprintf.c
new file mode 100644 (file)
index 0000000..45f9a06
--- /dev/null
@@ -0,0 +1,24 @@
+#include <sys/types.h>
+#include <linux/kernel.h>
+#include <stdio.h>
+
+int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+       int i = vsnprintf(buf, size, fmt, args);
+       ssize_t ssize = size;
+
+       return (i >= ssize) ? (ssize - 1) : i;
+}
+
+int scnprintf(char * buf, size_t size, const char * fmt, ...)
+{
+       ssize_t ssize = size;
+       va_list args;
+       int i;
+
+       va_start(args, fmt);
+       i = vsnprintf(buf, size, fmt, args);
+       va_end(args);
+
+       return (i >= ssize) ? (ssize - 1) : i;
+}
index 0e89258a354124b60a225bc966b07ea13e83ce75..2457916a3943710b0571bf29792a6ba97cc262f8 100644 (file)
@@ -5,9 +5,14 @@ objtool-y += special.o
 objtool-y += objtool.o
 
 objtool-y += libstring.o
+objtool-y += str_error_r.o
 
 CFLAGS += -I$(srctree)/tools/lib
 
 $(OUTPUT)libstring.o: ../lib/string.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)str_error_r.o: ../lib/str_error_r.c FORCE
+       $(call rule_mkdir)
+       $(call if_changed_dep,cc_o_c)
index f094f3c4ed84001557503f6cdd8ed33065c86611..1f75b0a046ccad81bd78da1189e5039f7c40fd37 100644 (file)
@@ -26,7 +26,7 @@ OBJTOOL_IN := $(OBJTOOL)-in.o
 
 all: $(OBJTOOL)
 
-INCLUDES := -I$(srctree)/tools/include
+INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi
 CFLAGS   += -Wall -Werror $(EXTRA_WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
 LDFLAGS  += -lelf $(LIBSUBCMD)
 
@@ -41,8 +41,11 @@ include $(srctree)/tools/build/Makefile.include
 $(OBJTOOL_IN): fixdep FORCE
        @$(MAKE) $(build)=objtool
 
+# Busybox's diff doesn't have -I, avoid warning in that case
+#
 $(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
-       @(test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
+       @(diff -I 2>&1 | grep -q 'option requires an argument' && \
+       test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
        diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
        diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
        diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
index e8a1e69eb92c5235735d42847e4f34b2e737a972..17fa7fc34fdf42c3c2d2cdb5bc98d47aac6bcab3 100644 (file)
@@ -26,6 +26,7 @@
  */
 
 #include <string.h>
+#include <stdlib.h>
 #include <subcmd/parse-options.h>
 
 #include "builtin.h"
@@ -122,10 +123,14 @@ static bool ignore_func(struct objtool_file *file, struct symbol *func)
 
        /* check for STACK_FRAME_NON_STANDARD */
        if (file->whitelist && file->whitelist->rela)
-               list_for_each_entry(rela, &file->whitelist->rela->rela_list, list)
-                       if (rela->sym->sec == func->sec &&
+               list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) {
+                       if (rela->sym->type == STT_SECTION &&
+                           rela->sym->sec == func->sec &&
                            rela->addend == func->offset)
                                return true;
+                       if (rela->sym->type == STT_FUNC && rela->sym == func)
+                               return true;
+               }
 
        /* check if it has a context switching instruction */
        func_for_each_insn(file, func, insn)
@@ -663,7 +668,7 @@ static int add_func_switch_tables(struct objtool_file *file,
                                  struct symbol *func)
 {
        struct instruction *insn, *prev_jump;
-       struct rela *text_rela, *rodata_rela, *prev_rela;
+       struct rela *text_rela, *rodata_rela, *prev_rela = NULL;
        int ret;
 
        prev_jump = NULL;
index e11f6b69cce67e05b16c31767fbf36a8f68f0af4..0d7983ac63ef9e300110d9a6ec6771a75378784e 100644 (file)
 #include "elf.h"
 #include "warn.h"
 
+/*
+ * Fallback for systems without this "read, mmaping if possible" cmd.
+ */
+#ifndef ELF_C_READ_MMAP
+#define ELF_C_READ_MMAP ELF_C_READ
+#endif
+
 struct section *find_section_by_name(struct elf *elf, const char *name)
 {
        struct section *sec;
index 3d1bb802dbf4663801cf5ba50e08befc2c4272a2..3db3db9278be6f52af6bb9935eb7698eb1fc2f79 100644 (file)
@@ -30,3 +30,4 @@ config.mak.autogen
 *.pyo
 .config-detected
 util/intel-pt-decoder/inat-tables.c
+arch/*/include/generated/
index 8484c3a04a6ada9e1983963ce5b6b841f31f53c8..24a59998fc91e814ad96f658d3481d88d798b60c 100644 (file)
@@ -12,14 +12,14 @@ Set the NDK variable to point to the path where you installed the NDK:
 
 2. Set cross-compiling environment variables for NDK toolchain and sysroot.
 For arm:
-  export NDK_TOOLCHAIN=${NDK}/toolchains/arm-linux-androideabi-4.6/prebuilt/linux-x86/bin/arm-linux-androideabi-
-  export NDK_SYSROOT=${NDK}/platforms/android-9/arch-arm
+  export NDK_TOOLCHAIN=${NDK}/toolchains/arm-linux-androideabi-4.9/prebuilt/linux-x86_64/bin/arm-linux-androideabi-
+  export NDK_SYSROOT=${NDK}/platforms/android-24/arch-arm
 For x86:
-  export NDK_TOOLCHAIN=${NDK}/toolchains/x86-4.6/prebuilt/linux-x86/bin/i686-linux-android-
-  export NDK_SYSROOT=${NDK}/platforms/android-9/arch-x86
+  export NDK_TOOLCHAIN=${NDK}/toolchains/x86-4.9/prebuilt/linux-x86_64/bin/i686-linux-android-
+  export NDK_SYSROOT=${NDK}/platforms/android-24/arch-x86
 
-This method is not working for Android NDK versions up to Revision 8b.
-perf uses some bionic enhancements that are not included in these NDK versions.
+This method is only tested for Android NDK versions Revision 11b and later.
+perf uses some bionic enhancements that are not included in prior NDK versions.
 You can use method (b) described below instead.
 
 (b). Use the Android source tree
@@ -49,9 +49,9 @@ II. Compile perf for Android
 ------------------------------------------------
 You need to run make with the NDK toolchain and sysroot defined above:
 For arm:
-  make ARCH=arm CROSS_COMPILE=${NDK_TOOLCHAIN} CFLAGS="--sysroot=${NDK_SYSROOT}"
+  make WERROR=0 ARCH=arm CROSS_COMPILE=${NDK_TOOLCHAIN} EXTRA_CFLAGS="-pie --sysroot=${NDK_SYSROOT}"
 For x86:
-  make ARCH=x86 CROSS_COMPILE=${NDK_TOOLCHAIN} CFLAGS="--sysroot=${NDK_SYSROOT}"
+  make WERROR=0 ARCH=x86 CROSS_COMPILE=${NDK_TOOLCHAIN} EXTRA_CFLAGS="-pie --sysroot=${NDK_SYSROOT}"
 
 III. Install perf
 -----------------------------------------------
index 778f54d4d0bd7516c3342d2fdda129138bc28779..8ffbd272952d2e57d21451408579c684a551aa65 100644 (file)
@@ -61,6 +61,13 @@ OPTIONS
 
 --stdio:: Use the stdio interface.
 
+--stdio-color::
+       'always', 'never' or 'auto', allowing configuring color output
+       via the command line, in addition to via "color.ui" .perfconfig.
+       Use '--stdio-color always' to generate color even when redirecting
+       to a pipe or file. Using just '--stdio-color' is equivalent to
+       using 'always'.
+
 --tui:: Use the TUI interface. Use of --tui requires a tty, if one is not
        present, as when piping to other commands, the stdio interface is
        used. This interfaces starts by centering on the line with more
index dd07b55f58d8e9990e6f625ab3ebce2bf6bd6bbc..058064db39d2d7e5cab506c5cc13911076a69a99 100644 (file)
@@ -15,6 +15,9 @@ DESCRIPTION
 This command manages the build-id cache. It can add, remove, update and purge
 files to/from the cache. In the future it should as well set upper limits for
 the space used by the cache, etc.
+This also scans the target binary for SDT (Statically Defined Tracing) and
+record it along with the buildid-cache, which will be used by perf-probe.
+For more details, see linkperf:perf-probe[1].
 
 OPTIONS
 -------
index be8fa1a0a97e4294454cbef60440118dec21245d..f0796a47dfa30ee6523381e069340b3eeeeeb3ef 100644 (file)
@@ -34,6 +34,10 @@ OPTIONS for 'convert'
 --verbose::
         Be more verbose (show counter open errors, etc).
 
+--all::
+       Convert all events, including non-sample events (comm, fork, ...), to output.
+       Default is off, only convert samples.
+
 SEE ALSO
 --------
 linkperf:perf[1]
index 1d6092c460dd085401cbe084fed2166d84de105c..73496320fca3f871ac50024aff4a5e1b7def582b 100644 (file)
@@ -56,6 +56,9 @@ OPTIONS
 --all-user::
        Configure all used events to run in user space.
 
+--ldload::
+       Specify desired latency for loads event.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1]
index 3a8a9ba2b0412aba28f1796e283106f84ec63266..736da44596e451fa1a14d9a045f7feda269a0779 100644 (file)
@@ -67,7 +67,10 @@ OPTIONS
 
 -l::
 --list[=[GROUP:]EVENT]::
-       List up current probe events. This can also accept filtering patterns of event names.
+       List up current probe events. This can also accept filtering patterns of
+       event names.
+       When this is used with --cache, perf shows all cached probes instead of
+       the live probes.
 
 -L::
 --line=::
@@ -109,6 +112,12 @@ OPTIONS
        Dry run. With this option, --add and --del doesn't execute actual
        adding and removal operations.
 
+--cache::
+       (With --add) Cache the probes. Any events which successfully added
+       are also stored in the cache file.
+       (With --list) Show cached probes.
+       (With --del) Remove cached probes.
+
 --max-probes=NUM::
        Set the maximum number of probe points for an event. Default is 128.
 
@@ -134,19 +143,30 @@ PROBE SYNTAX
 Probe points are defined by following syntax.
 
     1) Define event based on function name
-     [EVENT=]FUNC[@SRC][:RLN|+OFFS|%return|;PTN] [ARG ...]
+     [[GROUP:]EVENT=]FUNC[@SRC][:RLN|+OFFS|%return|;PTN] [ARG ...]
 
     2) Define event based on source file with line number
-     [EVENT=]SRC:ALN [ARG ...]
+     [[GROUP:]EVENT=]SRC:ALN [ARG ...]
 
     3) Define event based on source file with lazy pattern
-     [EVENT=]SRC;PTN [ARG ...]
+     [[GROUP:]EVENT=]SRC;PTN [ARG ...]
 
+    4) Pre-defined SDT events or cached event with name
+     %[sdt_PROVIDER:]SDTEVENT
+     or,
+     sdt_PROVIDER:SDTEVENT
 
-'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. Currently, event group name is set as 'probe'.
+'EVENT' specifies the name of new event, if omitted, it will be set the name of the probed function. You can also specify a group name by 'GROUP', if omitted, set 'probe' is used for kprobe and 'probe_<bin>' is used for uprobe.
+Note that using existing group name can conflict with other events. Especially, using the group name reserved for kernel modules can hide embedded events in the
+modules.
 'FUNC' specifies a probed function name, and it may have one of the following options; '+OFFS' is the offset from function entry address in bytes, ':RLN' is the relative-line number from function entry line, and '%return' means that it probes function return. And ';PTN' means lazy matching pattern (see LAZY MATCHING). Note that ';PTN' must be the end of the probe point definition.  In addition, '@SRC' specifies a source file which has that function.
 It is also possible to specify a probe point by the source line number or lazy matching by using 'SRC:ALN' or 'SRC;PTN' syntax, where 'SRC' is the source file path, ':ALN' is the line number and ';PTN' is the lazy matching pattern.
 'ARG' specifies the arguments of this probe point, (see PROBE ARGUMENT).
+'SDTEVENT' and 'PROVIDER' is the pre-defined event name which is defined by user SDT (Statically Defined Tracing) or the pre-cached probes with event name.
+Note that before using the SDT event, the target binary (on which SDT events are defined) must be scanned by linkperf:perf-buildid-cache[1] to make SDT events as cached events.
+
+For details of the SDT, see below.
+https://sourceware.org/gdb/onlinedocs/gdb/Static-Probe-Points.html
 
 PROBE ARGUMENT
 --------------
@@ -226,4 +246,4 @@ Add probes at malloc() function on libc
 
 SEE ALSO
 --------
-linkperf:perf-trace[1], linkperf:perf-record[1]
+linkperf:perf-trace[1], linkperf:perf-record[1], linkperf:perf-buildid-cache[1]
index 8dbee832abd9e04cb6a8b5dead675fa27b0f39bb..69966abf65d1b21dd6741be6fd7eeb3e9e9d1bfc 100644 (file)
@@ -360,6 +360,35 @@ particular perf.data snapshot should be kept or not.
 
 Implies --timestamp-filename, --no-buildid and --no-buildid-cache.
 
+--dry-run::
+Parse options then exit. --dry-run can be used to detect errors in cmdline
+options.
+
+'perf record --dry-run -e' can act as a BPF script compiler if llvm.dump-obj
+in config file is set to true.
+
+--tail-synthesize::
+Instead of collecting non-sample events (for example, fork, comm, mmap) at
+the beginning of record, collect them during finalizing an output file.
+The collected non-sample events reflects the status of the system when
+record is finished.
+
+--overwrite::
+Makes all events use an overwritable ring buffer. An overwritable ring
+buffer works like a flight recorder: when it gets full, the kernel will
+overwrite the oldest records, that thus will never make it to the
+perf.data file.
+
+When '--overwrite' and '--switch-output' are used perf records and drops
+events until it receives a signal, meaning that something unusual was
+detected that warrants taking a snapshot of the most current events,
+those fitting in the ring buffer at that moment.
+
+'overwrite' attribute can also be set or canceled for an event using
+config terms. For example: 'cycles/overwrite/' and 'instructions/no-overwrite/'.
+
+Implies --tail-synthesize.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1]
index 9cbddc290affdb2ad67c8c76b676e8932513d02f..2d1746295abf1206b5feed03ef8ad344aea6ff59 100644 (file)
@@ -265,6 +265,13 @@ OPTIONS
 
 --stdio:: Use the stdio interface.
 
+--stdio-color::
+       'always', 'never' or 'auto', allowing configuring color output
+       via the command line, in addition to via "color.ui" .perfconfig.
+       Use '--stdio-color always' to generate color even when redirecting
+       to a pipe or file. Using just '--stdio-color' is equivalent to
+       using 'always'.
+
 --tui:: Use the TUI interface, that is integrated with annotate and allows
         zooming into DSOs or threads, among other features. Use of --tui
        requires a tty, if one is not present, as when piping to other
index 4fc44c75263fdb803315394e8353d77063f69e6a..1f6c70594f0f79e378163c6430b9028642eee45d 100644 (file)
@@ -119,13 +119,13 @@ OPTIONS
        srcline, period, iregs, brstack, brstacksym, flags.
         Field list can be prepended with the type, trace, sw or hw,
         to indicate to which event type the field list applies.
-        e.g., -f sw:comm,tid,time,ip,sym  and -f trace:time,cpu,trace
+        e.g., -F sw:comm,tid,time,ip,sym  and -F trace:time,cpu,trace
 
-               perf script -f <fields>
+               perf script -F <fields>
 
        is equivalent to:
 
-               perf script -f trace:<fields> -f sw:<fields> -f hw:<fields>
+               perf script -F trace:<fields> -F sw:<fields> -F hw:<fields>
 
        i.e., the specified fields apply to all event types if the type string
        is not given.
@@ -133,9 +133,9 @@ OPTIONS
        The arguments are processed in the order received. A later usage can
        reset a prior request. e.g.:
 
-               -f trace: -f comm,tid,time,ip,sym
+               -F trace: -F comm,tid,time,ip,sym
 
-       The first -f suppresses trace events (field list is ""), but then the
+       The first -F suppresses trace events (field list is ""), but then the
        second invocation sets the fields to comm,tid,time,ip,sym. In this case a
        warning is given to the user:
 
@@ -143,9 +143,9 @@ OPTIONS
 
        Alternatively, consider the order:
 
-               -f comm,tid,time,ip,sym -f trace:
+               -F comm,tid,time,ip,sym -F trace:
 
-       The first -f sets the fields for all events and the second -f
+       The first -F sets the fields for all events and the second -F
        suppresses trace events. The user is given a warning message about
        the override, and the result of the above is that only S/W and H/W
        events are displayed with the given fields.
@@ -154,14 +154,14 @@ OPTIONS
        event type, a message is displayed to the user that the option is
        ignored for that type. For example:
 
-               $ perf script -f comm,tid,trace
+               $ perf script -F comm,tid,trace
                'trace' not valid for hardware events. Ignoring.
                'trace' not valid for software events. Ignoring.
 
        Alternatively, if the type is given an invalid field is specified it
        is an error. For example:
 
-        perf script -v -f sw:comm,tid,trace
+        perf script -v -F sw:comm,tid,trace
         'trace' not valid for software events.
 
        At this point usage is displayed, and perf-script exits.
@@ -170,10 +170,19 @@ OPTIONS
        Trace decoding. The flags are "bcrosyiABEx" which stand for branch,
        call, return, conditional, system, asynchronous, interrupt,
        transaction abort, trace begin, trace end, and in transaction,
-       respectively.
+       respectively. Known combinations of flags are printed more nicely e.g.
+       "call" for "bc", "return" for "br", "jcc" for "bo", "jmp" for "b",
+       "int" for "bci", "iret" for "bri", "syscall" for "bcs", "sysret" for "brs",
+       "async" for "by", "hw int" for "bcyi", "tx abrt" for "bA", "tr strt" for "bB",
+       "tr end" for "bE". However the "x" flag will be display separately in those
+       cases e.g. "jcc     (x)" for a condition branch within a transaction.
+
+       The callindent field is synthesized and may have a value when
+       Instruction Trace decoding. For calls and returns, it will display the
+       name of the symbol indented with spaces to reflect the stack depth.
 
        Finally, a user may not set fields to none for all event types.
-       i.e., -f "" is not allowed.
+       i.e., -F "" is not allowed.
 
        The brstack output includes branch related information with raw addresses using the
        /v/v/v/v/ syntax in the following order:
index 04f23b404bbc5a7bd4b5bcdfb7e833d79ba1234e..d96ccd4844df9a49f33b05c6c0384b5f8e6eef05 100644 (file)
@@ -204,6 +204,38 @@ Aggregate counts per physical processor for system-wide mode measurements.
 --no-aggr::
 Do not aggregate counts across all monitored CPUs.
 
+--topdown::
+Print top down level 1 metrics if supported by the CPU. This allows to
+determine bottle necks in the CPU pipeline for CPU bound workloads,
+by breaking the cycles consumed down into frontend bound, backend bound,
+bad speculation and retiring.
+
+Frontend bound means that the CPU cannot fetch and decode instructions fast
+enough. Backend bound means that computation or memory access is the bottle
+neck. Bad Speculation means that the CPU wasted cycles due to branch
+mispredictions and similar issues. Retiring means that the CPU computed without
+an apparently bottleneck. The bottleneck is only the real bottleneck
+if the workload is actually bound by the CPU and not by something else.
+
+For best results it is usually a good idea to use it with interval
+mode like -I 1000, as the bottleneck of workloads can change often.
+
+The top down metrics are collected per core instead of per
+CPU thread. Per core mode is automatically enabled
+and -a (global monitoring) is needed, requiring root rights or
+perf.perf_event_paranoid=-1.
+
+Topdown uses the full Performance Monitoring Unit, and needs
+disabling of the NMI watchdog (as root):
+echo 0 > /proc/sys/kernel/nmi_watchdog
+for best results. Otherwise the bottlenecks may be inconsistent
+on workload with changing phases.
+
+This enables --metric-only, unless overriden with --no-metric-only.
+
+To interpret the results it is usually needed to know on which
+CPUs the workload runs on. If needed the CPUs can be forced using
+taskset.
 
 EXAMPLES
 --------
index 31a5c3ea7f74233271626f964dddc9cbd09f6526..b329c65d7f40b5585dc524fcf901657641166f0a 100644 (file)
@@ -30,3 +30,7 @@ OPTIONS
 -v::
 --verbose::
        Be more verbose.
+
+-F::
+--dont-fork::
+       Do not fork child for each test, run all tests within single process.
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
new file mode 100644 (file)
index 0000000..fdc99fe
--- /dev/null
@@ -0,0 +1,442 @@
+perf.data format
+
+Uptodate as of v4.7
+
+This document describes the on-disk perf.data format, generated by perf record
+or perf inject and consumed by the other perf tools.
+
+On a high level perf.data contains the events generated by the PMUs, plus metadata.
+
+All fields are in native-endian of the machine that generated the perf.data.
+
+When perf is writing to a pipe it uses a special version of the file
+format that does not rely on seeking to adjust data offsets.  This
+format is not described here. The pipe version can be converted to
+normal perf.data with perf inject.
+
+The file starts with a perf_header:
+
+struct perf_header {
+       char magic[8];          /* PERFILE2 */
+       uint64_t size;          /* size of the header */
+       uint64_t attr_size;     /* size of an attribute in attrs */
+       struct perf_file_section attrs;
+       struct perf_file_section data;
+       struct perf_file_section event_types;
+       uint64_t flags;
+       uint64_t flags1[3];
+};
+
+The magic number identifies the perf file and the version. Current perf versions
+use PERFILE2. Old perf versions generated a version 1 format (PERFFILE). Version 1
+is not described here. The magic number also identifies the endian. When the
+magic value is 64bit byte swapped compared the file is in non-native
+endian.
+
+A perf_file_section contains a pointer to another section of the perf file.
+The header contains three such pointers: for attributes, data and event types.
+
+struct perf_file_section {
+       uint64_t offset;        /* offset from start of file */
+       uint64_t size;          /* size of the section */
+};
+
+Flags section:
+
+The header is followed by different optional headers, described by the bits set
+in flags. Only headers for which the bit is set are included. Each header
+consists of a perf_file_section located after the initial header.
+The respective perf_file_section points to the data of the additional
+header and defines its size.
+
+Some headers consist of strings, which are defined like this:
+
+struct perf_header_string {
+       uint32_t len;
+       char string[len]; /* zero terminated */
+};
+
+Some headers consist of a sequence of strings, which start with a
+
+struct perf_header_string_list {
+     uint32_t nr;
+     struct perf_header_string strings[nr]; /* variable length records */
+};
+
+The bits are the flags bits in a 256 bit bitmap starting with
+flags. These define the valid bits:
+
+       HEADER_RESERVED         = 0,    /* always cleared */
+       HEADER_FIRST_FEATURE    = 1,
+       HEADER_TRACING_DATA     = 1,
+
+Describe me.
+
+       HEADER_BUILD_ID = 2,
+
+The header consists of an sequence of build_id_event. The size of each record
+is defined by header.size (see perf_event.h). Each event defines a ELF build id
+for a executable file name for a pid. An ELF build id is a unique identifier
+assigned by the linker to an executable.
+
+struct build_id_event {
+       struct perf_event_header header;
+       pid_t                    pid;
+       uint8_t                  build_id[24];
+       char                     filename[header.size - offsetof(struct build_id_event, filename)];
+};
+
+       HEADER_HOSTNAME = 3,
+
+A perf_header_string with the hostname where the data was collected
+(uname -n)
+
+       HEADER_OSRELEASE = 4,
+
+A perf_header_string with the os release where the data was collected
+(uname -r)
+
+       HEADER_VERSION = 5,
+
+A perf_header_string with the perf user tool version where the
+data was collected. This is the same as the version of the source tree
+the perf tool was built from.
+
+       HEADER_ARCH = 6,
+
+A perf_header_string with the CPU architecture (uname -m)
+
+       HEADER_NRCPUS = 7,
+
+A structure defining the number of CPUs.
+
+struct nr_cpus {
+       uint32_t nr_cpus_online;
+       uint32_t nr_cpus_available; /* CPUs not yet onlined */
+};
+
+       HEADER_CPUDESC = 8,
+
+A perf_header_string with description of the CPU. On x86 this is the model name
+in /proc/cpuinfo
+
+       HEADER_CPUID = 9,
+
+A perf_header_string with the exact CPU type. On x86 this is
+vendor,family,model,stepping. For example: GenuineIntel,6,69,1
+
+       HEADER_TOTAL_MEM = 10,
+
+An uint64_t with the total memory in bytes.
+
+       HEADER_CMDLINE = 11,
+
+A perf_header_string with the perf command line used to collect the data.
+
+       HEADER_EVENT_DESC = 12,
+
+Another description of the perf_event_attrs, more detailed than header.attrs
+including IDs and names. See perf_event.h or the man page for a description
+of a struct perf_event_attr.
+
+struct {
+       uint32_t nr; /* number of events */
+       uint32_t attr_size; /* size of each perf_event_attr */
+       struct {
+             struct perf_event_attr attr;  /* size of attr_size */
+             uint32_t nr_ids;
+             struct perf_header_string event_string;
+             uint64_t ids[nr_ids];
+       } events[nr]; /* Variable length records */
+};
+
+       HEADER_CPU_TOPOLOGY = 13,
+
+String lists defining the core and CPU threads topology.
+
+struct {
+       struct perf_header_string_list cores; /* Variable length */
+       struct perf_header_string_list threads; /* Variable length */
+};
+
+Example:
+       sibling cores   : 0-3
+       sibling threads : 0-1
+       sibling threads : 2-3
+
+       HEADER_NUMA_TOPOLOGY = 14,
+
+       A list of NUMA node descriptions
+
+struct {
+       uint32_t nr;
+       struct {
+             uint32_t nodenr;
+             uint64_t mem_total;
+             uint64_t mem_free;
+             struct perf_header_string cpus;
+       } nodes[nr]; /* Variable length records */
+};
+
+       HEADER_BRANCH_STACK = 15,
+
+Not implemented in perf.
+
+       HEADER_PMU_MAPPINGS = 16,
+
+       A list of PMU structures, defining the different PMUs supported by perf.
+
+struct {
+       uint32_t nr;
+       struct pmu {
+             uint32_t pmu_type;
+             struct perf_header_string pmu_name;
+       } [nr]; /* Variable length records */
+};
+
+       HEADER_GROUP_DESC = 17,
+
+       Description of counter groups ({...} in perf syntax)
+
+struct {
+         uint32_t nr;
+         struct {
+               struct perf_header_string string;
+               uint32_t leader_idx;
+               uint32_t nr_members;
+        } [nr]; /* Variable length records */
+};
+
+       HEADER_AUXTRACE = 18,
+
+Define additional auxtrace areas in the perf.data. auxtrace is used to store
+undecoded hardware tracing information, such as Intel Processor Trace data.
+
+/**
+ * struct auxtrace_index_entry - indexes a AUX area tracing event within a
+ *                               perf.data file.
+ * @file_offset: offset within the perf.data file
+ * @sz: size of the event
+ */
+struct auxtrace_index_entry {
+       u64                     file_offset;
+       u64                     sz;
+};
+
+#define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
+
+/**
+ * struct auxtrace_index - index of AUX area tracing events within a perf.data
+ *                         file.
+ * @list: linking a number of arrays of entries
+ * @nr: number of entries
+ * @entries: array of entries
+ */
+struct auxtrace_index {
+       struct list_head        list;
+       size_t                  nr;
+       struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
+};
+
+       other bits are reserved and should ignored for now
+       HEADER_FEAT_BITS        = 256,
+
+Attributes
+
+This is an array of perf_event_attrs, each attr_size bytes long, which defines
+each event collected. See perf_event.h or the man page for a detailed
+description.
+
+Data
+
+This section is the bulk of the file. It consist of a stream of perf_events
+describing events. This matches the format generated by the kernel.
+See perf_event.h or the manpage for a detailed description.
+
+Some notes on parsing:
+
+Ordering
+
+The events are not necessarily in time stamp order, as they can be
+collected in parallel on different CPUs. If the events should be
+processed in time order they need to be sorted first. It is possible
+to only do a partial sort using the FINISHED_ROUND event header (see
+below). perf record guarantees that there is no reordering over a
+FINISHED_ROUND.
+
+ID vs IDENTIFIER
+
+When the event stream contains multiple events each event is identified
+by an ID. This can be either through the PERF_SAMPLE_ID or the
+PERF_SAMPLE_IDENTIFIER header. The PERF_SAMPLE_IDENTIFIER header is
+at a fixed offset from the event header, which allows reliable
+parsing of the header. Relying on ID may be ambigious.
+IDENTIFIER is only supported by newer Linux kernels.
+
+Perf record specific events:
+
+In addition to the kernel generated event types perf record adds its
+own event types (in addition it also synthesizes some kernel events,
+for example MMAP events)
+
+       PERF_RECORD_USER_TYPE_START             = 64,
+       PERF_RECORD_HEADER_ATTR                 = 64,
+
+struct attr_event {
+       struct perf_event_header header;
+       struct perf_event_attr attr;
+       uint64_t id[];
+};
+
+       PERF_RECORD_HEADER_EVENT_TYPE           = 65, /* depreceated */
+
+#define MAX_EVENT_NAME 64
+
+struct perf_trace_event_type {
+       uint64_t        event_id;
+       char    name[MAX_EVENT_NAME];
+};
+
+struct event_type_event {
+       struct perf_event_header header;
+       struct perf_trace_event_type event_type;
+};
+
+
+       PERF_RECORD_HEADER_TRACING_DATA         = 66,
+
+Describe me
+
+struct tracing_data_event {
+       struct perf_event_header header;
+       uint32_t size;
+};
+
+       PERF_RECORD_HEADER_BUILD_ID             = 67,
+
+Define a ELF build ID for a referenced executable.
+
+       struct build_id_event;   /* See above */
+
+       PERF_RECORD_FINISHED_ROUND              = 68,
+
+No event reordering over this header. No payload.
+
+       PERF_RECORD_ID_INDEX                    = 69,
+
+Map event ids to CPUs and TIDs.
+
+struct id_index_entry {
+       uint64_t id;
+       uint64_t idx;
+       uint64_t cpu;
+       uint64_t tid;
+};
+
+struct id_index_event {
+       struct perf_event_header header;
+       uint64_t nr;
+       struct id_index_entry entries[nr];
+};
+
+       PERF_RECORD_AUXTRACE_INFO               = 70,
+
+Auxtrace type specific information. Describe me
+
+struct auxtrace_info_event {
+       struct perf_event_header header;
+       uint32_t type;
+       uint32_t reserved__; /* For alignment */
+       uint64_t priv[];
+};
+
+       PERF_RECORD_AUXTRACE                    = 71,
+
+Defines auxtrace data. Followed by the actual data. The contents of
+the auxtrace data is dependent on the event and the CPU. For example
+for Intel Processor Trace it contains Processor Trace data generated
+by the CPU.
+
+struct auxtrace_event {
+       struct perf_event_header header;
+       uint64_t size;
+       uint64_t offset;
+       uint64_t reference;
+       uint32_t idx;
+       uint32_t tid;
+       uint32_t cpu;
+       uint32_t reserved__; /* For alignment */
+};
+
+struct aux_event {
+       struct perf_event_header header;
+       uint64_t        aux_offset;
+       uint64_t        aux_size;
+       uint64_t        flags;
+};
+
+       PERF_RECORD_AUXTRACE_ERROR              = 72,
+
+Describes an error in hardware tracing
+
+enum auxtrace_error_type {
+       PERF_AUXTRACE_ERROR_ITRACE  = 1,
+       PERF_AUXTRACE_ERROR_MAX
+};
+
+#define MAX_AUXTRACE_ERROR_MSG 64
+
+struct auxtrace_error_event {
+       struct perf_event_header header;
+       uint32_t type;
+       uint32_t code;
+       uint32_t cpu;
+       uint32_t pid;
+       uint32_t tid;
+       uint32_t reserved__; /* For alignment */
+       uint64_t ip;
+       char msg[MAX_AUXTRACE_ERROR_MSG];
+};
+
+Event types
+
+Define the event attributes with their IDs.
+
+An array bound by the perf_file_section size.
+
+       struct {
+               struct perf_event_attr attr;   /* Size defined by header.attr_size */
+               struct perf_file_section ids;
+       }
+
+ids points to a array of uint64_t defining the ids for event attr attr.
+
+References:
+
+include/uapi/linux/perf_event.h
+
+This is the canonical description of the kernel generated perf_events
+and the perf_event_attrs.
+
+perf_events manpage
+
+A manpage describing perf_event and perf_event_attr is here:
+http://web.eece.maine.edu/~vweaver/projects/perf_events/programming.html
+This tends to be slightly behind the kernel include, but has better
+descriptions.  An (typically older) version of the man page may be
+included with the standard Linux man pages, available with "man
+perf_events"
+
+pmu-tools
+
+https://github.com/andikleen/pmu-tools/tree/master/parser
+
+A definition of the perf.data format in python "construct" format is available
+in pmu-tools parser. This allows to read perf.data from python and dump it.
+
+quipper
+
+The quipper C++ parser is available at
+https://chromium.googlesource.com/chromiumos/platform/chromiumos-wide-profiling/
+Unfortunately this parser tends to be many versions behind and may not be able
+to parse data files generated by recent perf.
index 8c8c6b9ce915341dfc01a02185a53b4198b7d790..ad2534df4ba6ce988429510b5c35360c253299cf 100644 (file)
@@ -12,13 +12,23 @@ tools/arch/sparc/include/asm/barrier_32.h
 tools/arch/sparc/include/asm/barrier_64.h
 tools/arch/tile/include/asm/barrier.h
 tools/arch/x86/include/asm/barrier.h
+tools/arch/x86/include/asm/cpufeatures.h
+tools/arch/x86/include/asm/disabled-features.h
+tools/arch/x86/include/asm/required-features.h
+tools/arch/x86/include/uapi/asm/svm.h
+tools/arch/x86/include/uapi/asm/vmx.h
+tools/arch/x86/include/uapi/asm/kvm.h
+tools/arch/x86/include/uapi/asm/kvm_perf.h
+tools/arch/x86/lib/memcpy_64.S
+tools/arch/x86/lib/memset_64.S
+tools/arch/s390/include/uapi/asm/kvm_perf.h
+tools/arch/s390/include/uapi/asm/sie.h
 tools/arch/xtensa/include/asm/barrier.h
 tools/scripts
 tools/build
 tools/arch/x86/include/asm/atomic.h
 tools/arch/x86/include/asm/rmwcc.h
 tools/lib/traceevent
-tools/lib/bpf
 tools/lib/api
 tools/lib/bpf
 tools/lib/subcmd
@@ -29,6 +39,9 @@ tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
 tools/lib/find_bit.c
 tools/lib/bitmap.c
+tools/lib/str_error_r.c
+tools/lib/vsprintf.c
+tools/include/asm/alternative-asm.h
 tools/include/asm/atomic.h
 tools/include/asm/barrier.h
 tools/include/asm/bug.h
@@ -52,43 +65,16 @@ tools/include/linux/hash.h
 tools/include/linux/kernel.h
 tools/include/linux/list.h
 tools/include/linux/log2.h
+tools/include/uapi/linux/bpf.h
+tools/include/uapi/linux/bpf_common.h
+tools/include/uapi/linux/hw_breakpoint.h
+tools/include/uapi/linux/perf_event.h
 tools/include/linux/poison.h
 tools/include/linux/rbtree.h
 tools/include/linux/rbtree_augmented.h
 tools/include/linux/string.h
+tools/include/linux/stringify.h
 tools/include/linux/types.h
 tools/include/linux/err.h
 tools/include/linux/bitmap.h
-include/asm-generic/bitops/arch_hweight.h
-include/asm-generic/bitops/const_hweight.h
-include/asm-generic/bitops/fls64.h
-include/asm-generic/bitops/__fls.h
-include/asm-generic/bitops/fls.h
-include/linux/perf_event.h
-include/linux/list.h
-include/linux/hash.h
-include/linux/stringify.h
-include/linux/swab.h
-arch/*/include/asm/unistd*.h
-arch/*/include/uapi/asm/unistd*.h
-arch/*/include/uapi/asm/perf_regs.h
-arch/*/lib/memcpy*.S
-arch/*/lib/memset*.S
-arch/*/include/asm/*features.h
-include/linux/poison.h
-include/linux/hw_breakpoint.h
-include/uapi/linux/perf_event.h
-include/uapi/linux/bpf.h
-include/uapi/linux/bpf_common.h
-include/uapi/linux/const.h
-include/uapi/linux/swab.h
-include/uapi/linux/hw_breakpoint.h
-arch/x86/include/asm/svm.h
-arch/x86/include/asm/vmx.h
-arch/x86/include/asm/kvm_host.h
-arch/x86/include/uapi/asm/svm.h
-arch/x86/include/uapi/asm/vmx.h
-arch/x86/include/uapi/asm/kvm.h
-arch/x86/include/uapi/asm/kvm_perf.h
-arch/s390/include/uapi/asm/sie.h
-arch/s390/include/uapi/asm/kvm_perf.h
+tools/arch/*/include/uapi/asm/perf_regs.h
index bde8cbae7dd98b732060114e22426494ee0df067..6641abb97f0ab8e81e805c3d9e95589369905c99 100644 (file)
@@ -81,6 +81,9 @@ include ../scripts/utilities.mak
 #
 # Define NO_LIBBPF if you do not want BPF support
 #
+# Define NO_SDT if you do not want to define SDT event in perf tools,
+# note that it doesn't disable SDT scanning support.
+#
 # Define FEATURES_DUMP to provide features detection dump file
 # and bypass the feature detection
 
@@ -254,7 +257,8 @@ PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
 PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPI)
 
 $(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
-       $(QUIET_GEN)CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
+       $(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
+        CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
          $(PYTHON_WORD) util/setup.py \
          --quiet build_ext; \
        mkdir -p $(OUTPUT)python && \
@@ -344,6 +348,87 @@ export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
 include $(srctree)/tools/build/Makefile.include
 
 $(PERF_IN): prepare FORCE
+       @(test -f ../../include/uapi/linux/perf_event.h && ( \
+        (diff -B ../include/uapi/linux/perf_event.h ../../include/uapi/linux/perf_event.h >/dev/null) \
+        || echo "Warning: tools/include/uapi/linux/perf_event.h differs from kernel" >&2 )) || true
+       @(test -f ../../include/linux/hash.h && ( \
+        (diff -B ../include/linux/hash.h ../../include/linux/hash.h >/dev/null) \
+        || echo "Warning: tools/include/linux/hash.h differs from kernel" >&2 )) || true
+       @(test -f ../../include/uapi/linux/hw_breakpoint.h && ( \
+        (diff -B ../include/uapi/linux/hw_breakpoint.h ../../include/uapi/linux/hw_breakpoint.h >/dev/null) \
+        || echo "Warning: tools/include/uapi/linux/hw_breakpoint.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/include/asm/disabled-features.h && ( \
+        (diff -B ../arch/x86/include/asm/disabled-features.h ../../arch/x86/include/asm/disabled-features.h >/dev/null) \
+        || echo "Warning: tools/arch/x86/include/asm/disabled-features.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/include/asm/required-features.h && ( \
+        (diff -B ../arch/x86/include/asm/required-features.h ../../arch/x86/include/asm/required-features.h >/dev/null) \
+        || echo "Warning: tools/arch/x86/include/asm/required-features.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/include/asm/cpufeatures.h && ( \
+        (diff -B ../arch/x86/include/asm/cpufeatures.h ../../arch/x86/include/asm/cpufeatures.h >/dev/null) \
+        || echo "Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/lib/memcpy_64.S && ( \
+        (diff -B ../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memcpy_64.S >/dev/null) \
+        || echo "Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/lib/memset_64.S && ( \
+        (diff -B ../arch/x86/lib/memset_64.S ../../arch/x86/lib/memset_64.S >/dev/null) \
+        || echo "Warning: tools/arch/x86/lib/memset_64.S differs from kernel" >&2 )) || true
+       @(test -f ../../arch/arm/include/uapi/asm/perf_regs.h && ( \
+        (diff -B ../arch/arm/include/uapi/asm/perf_regs.h ../../arch/arm/include/uapi/asm/perf_regs.h >/dev/null) \
+        || echo "Warning: tools/arch/arm/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/arm64/include/uapi/asm/perf_regs.h && ( \
+        (diff -B ../arch/arm64/include/uapi/asm/perf_regs.h ../../arch/arm64/include/uapi/asm/perf_regs.h >/dev/null) \
+        || echo "Warning: tools/arch/arm64/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/powerpc/include/uapi/asm/perf_regs.h && ( \
+        (diff -B ../arch/powerpc/include/uapi/asm/perf_regs.h ../../arch/powerpc/include/uapi/asm/perf_regs.h >/dev/null) \
+        || echo "Warning: tools/arch/powerpc/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/include/uapi/asm/perf_regs.h && ( \
+        (diff -B ../arch/x86/include/uapi/asm/perf_regs.h ../../arch/x86/include/uapi/asm/perf_regs.h >/dev/null) \
+        || echo "Warning: tools/arch/x86/include/uapi/asm/perf_regs.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/include/uapi/asm/kvm.h && ( \
+        (diff -B ../arch/x86/include/uapi/asm/kvm.h ../../arch/x86/include/uapi/asm/kvm.h >/dev/null) \
+        || echo "Warning: tools/arch/x86/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/include/uapi/asm/kvm_perf.h && ( \
+        (diff -B ../arch/x86/include/uapi/asm/kvm_perf.h ../../arch/x86/include/uapi/asm/kvm_perf.h >/dev/null) \
+        || echo "Warning: tools/arch/x86/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/include/uapi/asm/svm.h && ( \
+        (diff -B ../arch/x86/include/uapi/asm/svm.h ../../arch/x86/include/uapi/asm/svm.h >/dev/null) \
+        || echo "Warning: tools/arch/x86/include/uapi/asm/svm.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/x86/include/uapi/asm/vmx.h && ( \
+        (diff -B ../arch/x86/include/uapi/asm/vmx.h ../../arch/x86/include/uapi/asm/vmx.h >/dev/null) \
+        || echo "Warning: tools/arch/x86/include/uapi/asm/vmx.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/powerpc/include/uapi/asm/kvm.h && ( \
+        (diff -B ../arch/powerpc/include/uapi/asm/kvm.h ../../arch/powerpc/include/uapi/asm/kvm.h >/dev/null) \
+        || echo "Warning: tools/arch/powerpc/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/s390/include/uapi/asm/kvm.h && ( \
+        (diff -B ../arch/s390/include/uapi/asm/kvm.h ../../arch/s390/include/uapi/asm/kvm.h >/dev/null) \
+        || echo "Warning: tools/arch/s390/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/s390/include/uapi/asm/kvm_perf.h && ( \
+        (diff -B ../arch/s390/include/uapi/asm/kvm_perf.h ../../arch/s390/include/uapi/asm/kvm_perf.h >/dev/null) \
+        || echo "Warning: tools/arch/s390/include/uapi/asm/kvm_perf.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/s390/include/uapi/asm/sie.h && ( \
+        (diff -B ../arch/s390/include/uapi/asm/sie.h ../../arch/s390/include/uapi/asm/sie.h >/dev/null) \
+        || echo "Warning: tools/arch/s390/include/uapi/asm/sie.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/arm/include/uapi/asm/kvm.h && ( \
+        (diff -B ../arch/arm/include/uapi/asm/kvm.h ../../arch/arm/include/uapi/asm/kvm.h >/dev/null) \
+        || echo "Warning: tools/arch/arm/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
+       @(test -f ../../arch/arm64/include/uapi/asm/kvm.h && ( \
+        (diff -B ../arch/arm64/include/uapi/asm/kvm.h ../../arch/arm64/include/uapi/asm/kvm.h >/dev/null) \
+        || echo "Warning: tools/arch/arm64/include/uapi/asm/kvm.h differs from kernel" >&2 )) || true
+       @(test -f ../../include/asm-generic/bitops/arch_hweight.h && ( \
+        (diff -B ../include/asm-generic/bitops/arch_hweight.h ../../include/asm-generic/bitops/arch_hweight.h >/dev/null) \
+        || echo "Warning: tools/include/asm-generic/bitops/arch_hweight.h differs from kernel" >&2 )) || true
+       @(test -f ../../include/asm-generic/bitops/const_hweight.h && ( \
+        (diff -B ../include/asm-generic/bitops/const_hweight.h ../../include/asm-generic/bitops/const_hweight.h >/dev/null) \
+        || echo "Warning: tools/include/asm-generic/bitops/const_hweight.h differs from kernel" >&2 )) || true
+       @(test -f ../../include/asm-generic/bitops/__fls.h && ( \
+        (diff -B ../include/asm-generic/bitops/__fls.h ../../include/asm-generic/bitops/__fls.h >/dev/null) \
+        || echo "Warning: tools/include/asm-generic/bitops/__fls.h differs from kernel" >&2 )) || true
+       @(test -f ../../include/asm-generic/bitops/fls.h && ( \
+        (diff -B ../include/asm-generic/bitops/fls.h ../../include/asm-generic/bitops/fls.h >/dev/null) \
+        || echo "Warning: tools/include/asm-generic/bitops/fls.h differs from kernel" >&2 )) || true
+       @(test -f ../../include/asm-generic/bitops/fls64.h && ( \
+        (diff -B ../include/asm-generic/bitops/fls64.h ../../include/asm-generic/bitops/fls64.h >/dev/null) \
+        || echo "Warning: tools/include/asm-generic/bitops/fls64.h differs from kernel" >&2 )) || true
        $(Q)$(MAKE) $(build)=perf
 
 $(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
index d22e3d07de3d69b825155218ae0a46b9d257b47e..f98da17357c0ed19e76f66b194bf81323dc21fc5 100644 (file)
@@ -1,4 +1,4 @@
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 
-libperf-$(CONFIG_LIBUNWIND)          += unwind-libunwind.o
+libperf-$(CONFIG_LOCAL_LIBUNWIND)    += unwind-libunwind.o
 libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
index e58123a8912b8a70eb96432aa4e6cc7444cc8e37..02f41dba4f4fa75239bf395ebf037f327c665ee2 100644 (file)
@@ -1,2 +1,2 @@
 libperf-$(CONFIG_DWARF)     += dwarf-regs.o
-libperf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o
+libperf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
index a87afa91a99ebf31df4839872606b99d7137c486..c116b713f7f773e296ff9485ff1f2ce6046c6add 100644 (file)
@@ -1,11 +1,13 @@
 
+#ifndef REMOTE_UNWIND_LIBUNWIND
 #include <errno.h>
 #include <libunwind.h>
 #include "perf_regs.h"
 #include "../../util/unwind.h"
 #include "../../util/debug.h"
+#endif
 
-int libunwind__arch_reg_id(int regnum)
+int LIBUNWIND__ARCH_REG_ID(int regnum)
 {
        switch (regnum) {
        case UNW_AARCH64_X0:
index e83c8ce243039c12591781c1eea645fd2e341b2d..886dd2aaff0d81533468ad60db70abbd23465ec3 100644 (file)
@@ -1,6 +1,7 @@
 #include <stdio.h>
 #include <sys/utsname.h>
 #include "common.h"
+#include "../util/util.h"
 #include "../util/debug.h"
 
 const char *const arm_triplets[] = {
@@ -9,34 +10,44 @@ const char *const arm_triplets[] = {
        "arm-unknown-linux-",
        "arm-unknown-linux-gnu-",
        "arm-unknown-linux-gnueabi-",
+       "arm-linux-gnu-",
+       "arm-linux-gnueabihf-",
+       "arm-none-eabi-",
        NULL
 };
 
 const char *const arm64_triplets[] = {
        "aarch64-linux-android-",
+       "aarch64-linux-gnu-",
        NULL
 };
 
 const char *const powerpc_triplets[] = {
        "powerpc-unknown-linux-gnu-",
        "powerpc64-unknown-linux-gnu-",
+       "powerpc64-linux-gnu-",
+       "powerpc64le-linux-gnu-",
        NULL
 };
 
 const char *const s390_triplets[] = {
        "s390-ibm-linux-",
+       "s390x-linux-gnu-",
        NULL
 };
 
 const char *const sh_triplets[] = {
        "sh-unknown-linux-gnu-",
        "sh64-unknown-linux-gnu-",
+       "sh-linux-gnu-",
+       "sh64-linux-gnu-",
        NULL
 };
 
 const char *const sparc_triplets[] = {
        "sparc-unknown-linux-gnu-",
        "sparc64-unknown-linux-gnu-",
+       "sparc64-linux-gnu-",
        NULL
 };
 
@@ -49,12 +60,19 @@ const char *const x86_triplets[] = {
        "i386-pc-linux-gnu-",
        "i686-linux-android-",
        "i686-android-linux-",
+       "x86_64-linux-gnu-",
+       "i586-linux-gnu-",
        NULL
 };
 
 const char *const mips_triplets[] = {
        "mips-unknown-linux-gnu-",
        "mipsel-linux-android-",
+       "mips-linux-gnu-",
+       "mips64-linux-gnu-",
+       "mips64el-linux-gnuabi64-",
+       "mips64-linux-gnuabi64-",
+       "mipsel-linux-gnu-",
        NULL
 };
 
@@ -102,7 +120,7 @@ static int lookup_triplets(const char *const *triplets, const char *name)
  * Return architecture name in a normalized form.
  * The conversion logic comes from the Makefile.
  */
-static const char *normalize_arch(char *arch)
+const char *normalize_arch(char *arch)
 {
        if (!strcmp(arch, "x86_64"))
                return "x86";
index 7529cfb143cecf0aaf74dbaaf277f3fe58e871e4..6b01c736b7d9aad5d8329776636a9c77d4344cc0 100644 (file)
@@ -6,5 +6,6 @@
 extern const char *objdump_path;
 
 int perf_env__lookup_objdump(struct perf_env *env);
+const char *normalize_arch(char *arch);
 
 #endif /* ARCH_PERF_COMMON_H */
index cac6d17ce5db000ea008d63d5905acbd6fea21e0..555263e385c9210af5f70e08dd27871005c5a865 100644 (file)
 543    x32     io_setup                compat_sys_io_setup
 544    x32     io_submit               compat_sys_io_submit
 545    x32     execveat                compat_sys_execveat/ptregs
+534    x32     preadv2                 compat_sys_preadv2
+535    x32     pwritev2                compat_sys_pwritev2
index d4aa567a29c4685ece1fb142577322ba9e57c890..5c76cc83186a24ae0df5f3986a922dc75e4ff959 100644 (file)
@@ -154,10 +154,6 @@ next_event:
        err = 0;
 
 out_err:
-       if (evlist) {
-               perf_evlist__disable(evlist);
-               perf_evlist__delete(evlist);
-       }
-
+       perf_evlist__delete(evlist);
        return err;
 }
index 72193f19d6d75d3c32e19b424a0f1910ebbc24a6..500cf96db9790b66dfc54a726316a83583c4a24a 100644 (file)
@@ -1,12 +1,16 @@
+#include <errno.h>
 #include <unistd.h>
 #include <stdlib.h>
 #include <signal.h>
 #include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
 #include <linux/types.h>
 #include "perf.h"
 #include "debug.h"
 #include "tests/tests.h"
 #include "cloexec.h"
+#include "util.h"
 #include "arch-tests.h"
 
 static u64 rdpmc(unsigned int counter)
@@ -111,14 +115,14 @@ static int __test__rdpmc(void)
        if (fd < 0) {
                pr_err("Error: sys_perf_event_open() syscall returned "
                       "with %d (%s)\n", fd,
-                      strerror_r(errno, sbuf, sizeof(sbuf)));
+                      str_error_r(errno, sbuf, sizeof(sbuf)));
                return -1;
        }
 
        addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
        if (addr == (void *)(-1)) {
                pr_err("Error: mmap() syscall returned with (%s)\n",
-                      strerror_r(errno, sbuf, sizeof(sbuf)));
+                      str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_close;
        }
 
index 465970370f3ed4db72ca525ae5f56f55150d7b12..f95e6f46ef0dc64fdf24f41912ad325e70d6be1e 100644 (file)
@@ -3,11 +3,12 @@ libperf-y += tsc.o
 libperf-y += pmu.o
 libperf-y += kvm-stat.o
 libperf-y += perf_regs.o
+libperf-y += group.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 libperf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
 
-libperf-$(CONFIG_LIBUNWIND)          += unwind-libunwind.o
+libperf-$(CONFIG_LOCAL_LIBUNWIND)    += unwind-libunwind.o
 libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
 
 libperf-$(CONFIG_AUXTRACE) += auxtrace.o
index 7a7805583e3fe4f40bba1b3435d9c149ea3adebd..cc1d865e31f1b7975578343761104db81398e8de 100644 (file)
@@ -37,7 +37,7 @@ struct auxtrace_record *auxtrace_record__init_intel(struct perf_evlist *evlist,
        intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
 
        if (evlist) {
-               evlist__for_each(evlist, evsel) {
+               evlist__for_each_entry(evlist, evsel) {
                        if (intel_pt_pmu &&
                            evsel->attr.type == intel_pt_pmu->type)
                                found_pt = true;
diff --git a/tools/perf/arch/x86/util/group.c b/tools/perf/arch/x86/util/group.c
new file mode 100644 (file)
index 0000000..37f92aa
--- /dev/null
@@ -0,0 +1,27 @@
+#include <stdio.h>
+#include "api/fs/fs.h"
+#include "util/group.h"
+
+/*
+ * Check whether we can use a group for top down.
+ * Without a group may get bad results due to multiplexing.
+ */
+bool arch_topdown_check_group(bool *warn)
+{
+       int n;
+
+       if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0)
+               return false;
+       if (n > 0) {
+               *warn = true;
+               return false;
+       }
+       return true;
+}
+
+void arch_topdown_group_warn(void)
+{
+       fprintf(stderr,
+               "nmi_watchdog enabled with topdown. May give wrong results.\n"
+               "Disable with echo 0 > /proc/sys/kernel/nmi_watchdog\n");
+}
index 7dc30637cf66f4957dd6608a9b3a145a0e81a487..5132775a044fd7b1acf2a6c814567f56c7e03b3b 100644 (file)
@@ -124,7 +124,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
        btsr->evlist = evlist;
        btsr->snapshot_mode = opts->auxtrace_snapshot_mode;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type == intel_bts_pmu->type) {
                        if (intel_bts_evsel) {
                                pr_err("There may be only one " INTEL_BTS_PMU_NAME " event\n");
@@ -327,7 +327,7 @@ static int intel_bts_snapshot_start(struct auxtrace_record *itr)
                        container_of(itr, struct intel_bts_recording, itr);
        struct perf_evsel *evsel;
 
-       evlist__for_each(btsr->evlist, evsel) {
+       evlist__for_each_entry(btsr->evlist, evsel) {
                if (evsel->attr.type == btsr->intel_bts_pmu->type)
                        return perf_evsel__disable(evsel);
        }
@@ -340,7 +340,7 @@ static int intel_bts_snapshot_finish(struct auxtrace_record *itr)
                        container_of(itr, struct intel_bts_recording, itr);
        struct perf_evsel *evsel;
 
-       evlist__for_each(btsr->evlist, evsel) {
+       evlist__for_each_entry(btsr->evlist, evsel) {
                if (evsel->attr.type == btsr->intel_bts_pmu->type)
                        return perf_evsel__enable(evsel);
        }
@@ -422,7 +422,7 @@ static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
                        container_of(itr, struct intel_bts_recording, itr);
        struct perf_evsel *evsel;
 
-       evlist__for_each(btsr->evlist, evsel) {
+       evlist__for_each_entry(btsr->evlist, evsel) {
                if (evsel->attr.type == btsr->intel_bts_pmu->type)
                        return perf_evlist__enable_event_idx(btsr->evlist,
                                                             evsel, idx);
index a07b9605e93b3f1c7c746f5296229c20f8f3767a..fb51457ba338f8c1a3dc549582213eaf5446ec56 100644 (file)
@@ -131,7 +131,7 @@ static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
        if (!mask)
                return -EINVAL;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type == intel_pt_pmu->type) {
                        *res = intel_pt_masked_bits(mask, evsel->attr.config);
                        return 0;
@@ -511,7 +511,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
        ptr->evlist = evlist;
        ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type == intel_pt_pmu->type) {
                        if (intel_pt_evsel) {
                                pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
@@ -725,7 +725,7 @@ static int intel_pt_snapshot_start(struct auxtrace_record *itr)
                        container_of(itr, struct intel_pt_recording, itr);
        struct perf_evsel *evsel;
 
-       evlist__for_each(ptr->evlist, evsel) {
+       evlist__for_each_entry(ptr->evlist, evsel) {
                if (evsel->attr.type == ptr->intel_pt_pmu->type)
                        return perf_evsel__disable(evsel);
        }
@@ -738,7 +738,7 @@ static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
                        container_of(itr, struct intel_pt_recording, itr);
        struct perf_evsel *evsel;
 
-       evlist__for_each(ptr->evlist, evsel) {
+       evlist__for_each_entry(ptr->evlist, evsel) {
                if (evsel->attr.type == ptr->intel_pt_pmu->type)
                        return perf_evsel__enable(evsel);
        }
@@ -1011,7 +1011,7 @@ static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
                        container_of(itr, struct intel_pt_recording, itr);
        struct perf_evsel *evsel;
 
-       evlist__for_each(ptr->evlist, evsel) {
+       evlist__for_each_entry(ptr->evlist, evsel) {
                if (evsel->attr.type == ptr->intel_pt_pmu->type)
                        return perf_evlist__enable_event_idx(ptr->evlist, evsel,
                                                             idx);
index 357f1b13b5ae3e585aa609e303e08bd6f344d546..2e5567c94e09430f86db124a9a0fa1a05799943d 100644 (file)
@@ -62,6 +62,8 @@ int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
        struct perf_tsc_conversion tc;
        int err;
 
+       if (!pc)
+               return 0;
        err = perf_read_tsc_conversion(pc, &tc);
        if (err == -EOPNOTSUPP)
                return 0;
index db25e93d989ced4f74f2f9b61b56a00ea1e429b6..4f16661cbdbb507a34390df99202e5a73431ea5e 100644 (file)
@@ -1,12 +1,14 @@
 
+#ifndef REMOTE_UNWIND_LIBUNWIND
 #include <errno.h>
 #include <libunwind.h>
 #include "perf_regs.h"
 #include "../../util/unwind.h"
 #include "../../util/debug.h"
+#endif
 
 #ifdef HAVE_ARCH_X86_64_SUPPORT
-int libunwind__arch_reg_id(int regnum)
+int LIBUNWIND__ARCH_REG_ID(int regnum)
 {
        int id;
 
@@ -70,7 +72,7 @@ int libunwind__arch_reg_id(int regnum)
        return id;
 }
 #else
-int libunwind__arch_reg_id(int regnum)
+int LIBUNWIND__ARCH_REG_ID(int regnum)
 {
        int id;
 
index 0999ac536d869c57b0dcb02505148698b9470bd5..8024cd5febd226da1e3bfe99003f8eb217bdfc69 100644 (file)
@@ -8,18 +8,23 @@
  * many threads and futexes as possible.
  */
 
-#include "../perf.h"
-#include "../util/util.h"
+/* For the CLR_() macros */
+#include <pthread.h>
+
+#include <errno.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <sys/time.h>
+
 #include "../util/stat.h"
 #include <subcmd/parse-options.h>
-#include "../util/header.h"
 #include "bench.h"
 #include "futex.h"
 
 #include <err.h>
-#include <stdlib.h>
 #include <sys/time.h>
-#include <pthread.h>
 
 static unsigned int nthreads = 0;
 static unsigned int nsecs    = 10;
index 6952db65508abced08317b18e8262cc72881b32b..936d89d3048354adea0b182d6223fb6be119d9ce 100644 (file)
@@ -2,18 +2,21 @@
  * Copyright (C) 2015 Davidlohr Bueso.
  */
 
-#include "../perf.h"
-#include "../util/util.h"
+/* For the CLR_() macros */
+#include <pthread.h>
+
+#include <signal.h>
 #include "../util/stat.h"
 #include <subcmd/parse-options.h>
-#include "../util/header.h"
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <errno.h>
 #include "bench.h"
 #include "futex.h"
 
 #include <err.h>
 #include <stdlib.h>
 #include <sys/time.h>
-#include <pthread.h>
 
 struct worker {
        int tid;
index 71823868301347a47fdc7d31723dd325426ac3a5..f96e22ed9f873de6aada5bb9a3e5ac0fdc902b35 100644 (file)
@@ -8,18 +8,21 @@
  * requeues without waking up any tasks -- thus mimicking a regular futex_wait.
  */
 
-#include "../perf.h"
-#include "../util/util.h"
+/* For the CLR_() macros */
+#include <pthread.h>
+
+#include <signal.h>
 #include "../util/stat.h"
 #include <subcmd/parse-options.h>
-#include "../util/header.h"
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <errno.h>
 #include "bench.h"
 #include "futex.h"
 
 #include <err.h>
 #include <stdlib.h>
 #include <sys/time.h>
-#include <pthread.h>
 
 static u_int32_t futex1 = 0, futex2 = 0;
 
index 91aaf2a1fa9050dae0a866e6a851777a87c37a0b..4a2ecd7438ca0ffeb3f9e354e1a24668f06db338 100644 (file)
@@ -7,18 +7,21 @@
  * it can be used to measure futex_wake() changes.
  */
 
-#include "../perf.h"
-#include "../util/util.h"
+/* For the CLR_() macros */
+#include <pthread.h>
+
+#include <signal.h>
 #include "../util/stat.h"
 #include <subcmd/parse-options.h>
-#include "../util/header.h"
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <errno.h>
 #include "bench.h"
 #include "futex.h"
 
 #include <err.h>
 #include <stdlib.h>
 #include <sys/time.h>
-#include <pthread.h>
 
 struct thread_data {
        pthread_t worker;
index f416bd705f661f056da8e1424643922f1999600d..87d8f4f292d95bfa715c8c2408a69a712c33da78 100644 (file)
@@ -8,18 +8,21 @@
  * one or more tasks, and thus the waitqueue is never empty.
  */
 
-#include "../perf.h"
-#include "../util/util.h"
+/* For the CLR_() macros */
+#include <pthread.h>
+
+#include <signal.h>
 #include "../util/stat.h"
 #include <subcmd/parse-options.h>
-#include "../util/header.h"
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <errno.h>
 #include "bench.h"
 #include "futex.h"
 
 #include <err.h>
 #include <stdlib.h>
 #include <sys/time.h>
-#include <pthread.h>
 
 /* all threads will block on the same futex */
 static u_int32_t futex1 = 0;
index 5c3cce082cb88c5cab984bdb1ae6944274861ad4..f700369bb0f6ed181b3ccdbd60ce713283776ccc 100644 (file)
@@ -6,7 +6,7 @@
 #define globl p2align 4; .globl
 #define _ASM_EXTABLE_FAULT(x, y)
 
-#include "../../../arch/x86/lib/memcpy_64.S"
+#include "../../arch/x86/lib/memcpy_64.S"
 /*
  * We need to provide note.GNU-stack section, saying that we want
  * NOT executable stack. Otherwise the final linking will assume that
index de278784c866a3804040408454f38c4a4f7ca44e..58407aa24c1bfdde178f299d9d6ccb6b0f75265b 100644 (file)
@@ -1,7 +1,7 @@
 #define memset MEMSET /* don't hide glibc's memset() */
 #define altinstr_replacement text
 #define globl p2align 4; .globl
-#include "../../../arch/x86/lib/memset_64.S"
+#include "../../arch/x86/lib/memset_64.S"
 
 /*
  * We need to provide note.GNU-stack section, saying that we want
index 7500d959d7eb0002e4d5da138e6b8365bc51db8e..f7f530081aa9421a0e01ae362fa02c3192c2896c 100644 (file)
@@ -4,6 +4,9 @@
  * numa: Simulate NUMA-sensitive workload and measure their NUMA performance
  */
 
+/* For the CLR_() macros */
+#include <pthread.h>
+
 #include "../perf.h"
 #include "../builtin.h"
 #include "../util/util.h"
@@ -21,7 +24,6 @@
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
-#include <pthread.h>
 #include <sys/mman.h>
 #include <sys/time.h>
 #include <sys/resource.h>
index 25c81734a9505604d8056ef897dd64975998ab66..9c1034d81b4fe3cc72d4b3b09ac8d527d07b0da0 100644 (file)
@@ -75,7 +75,7 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
        sample->period = 1;
        sample->weight = 1;
 
-       he = __hists__add_entry(hists, al, NULL, NULL, NULL, sample, true);
+       he = hists__add_entry(hists, al, NULL, NULL, NULL, sample, true);
        if (he == NULL)
                return -ENOMEM;
 
@@ -236,7 +236,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
                perf_session__fprintf_dsos(session, stdout);
 
        total_nr_samples = 0;
-       evlist__for_each(session->evlist, pos) {
+       evlist__for_each_entry(session->evlist, pos) {
                struct hists *hists = evsel__hists(pos);
                u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
 
@@ -339,6 +339,9 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
                    "Show event group information together"),
        OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
                    "Show a column with the sum of periods"),
+       OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
+                            "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
+                            stdio__config_color, "always"),
        OPT_END()
        };
        int ret = hists__init();
index d75bded21fe0ce73ef9aea745ff565c9ba61a204..30e2b2cb2421219de6cc168483f5e9144af8036c 100644 (file)
@@ -209,7 +209,7 @@ static int build_id_cache__purge_path(const char *pathname)
        if (err)
                goto out;
 
-       strlist__for_each(pos, list) {
+       strlist__for_each_entry(pos, list) {
                err = build_id_cache__remove_s(pos->s);
                pr_debug("Removing %s %s: %s\n", pos->s, pathname,
                         err ? "FAIL" : "Ok");
@@ -343,7 +343,7 @@ int cmd_buildid_cache(int argc, const char **argv,
        if (add_name_list_str) {
                list = strlist__new(add_name_list_str, NULL);
                if (list) {
-                       strlist__for_each(pos, list)
+                       strlist__for_each_entry(pos, list)
                                if (build_id_cache__add_file(pos->s)) {
                                        if (errno == EEXIST) {
                                                pr_debug("%s already in the cache\n",
@@ -351,7 +351,7 @@ int cmd_buildid_cache(int argc, const char **argv,
                                                continue;
                                        }
                                        pr_warning("Couldn't add %s: %s\n",
-                                                  pos->s, strerror_r(errno, sbuf, sizeof(sbuf)));
+                                                  pos->s, str_error_r(errno, sbuf, sizeof(sbuf)));
                                }
 
                        strlist__delete(list);
@@ -361,7 +361,7 @@ int cmd_buildid_cache(int argc, const char **argv,
        if (remove_name_list_str) {
                list = strlist__new(remove_name_list_str, NULL);
                if (list) {
-                       strlist__for_each(pos, list)
+                       strlist__for_each_entry(pos, list)
                                if (build_id_cache__remove_file(pos->s)) {
                                        if (errno == ENOENT) {
                                                pr_debug("%s wasn't in the cache\n",
@@ -369,7 +369,7 @@ int cmd_buildid_cache(int argc, const char **argv,
                                                continue;
                                        }
                                        pr_warning("Couldn't remove %s: %s\n",
-                                                  pos->s, strerror_r(errno, sbuf, sizeof(sbuf)));
+                                                  pos->s, str_error_r(errno, sbuf, sizeof(sbuf)));
                                }
 
                        strlist__delete(list);
@@ -379,7 +379,7 @@ int cmd_buildid_cache(int argc, const char **argv,
        if (purge_name_list_str) {
                list = strlist__new(purge_name_list_str, NULL);
                if (list) {
-                       strlist__for_each(pos, list)
+                       strlist__for_each_entry(pos, list)
                                if (build_id_cache__purge_path(pos->s)) {
                                        if (errno == ENOENT) {
                                                pr_debug("%s wasn't in the cache\n",
@@ -387,7 +387,7 @@ int cmd_buildid_cache(int argc, const char **argv,
                                                continue;
                                        }
                                        pr_warning("Couldn't remove %s: %s\n",
-                                                  pos->s, strerror_r(errno, sbuf, sizeof(sbuf)));
+                                                  pos->s, str_error_r(errno, sbuf, sizeof(sbuf)));
                                }
 
                        strlist__delete(list);
@@ -400,7 +400,7 @@ int cmd_buildid_cache(int argc, const char **argv,
        if (update_name_list_str) {
                list = strlist__new(update_name_list_str, NULL);
                if (list) {
-                       strlist__for_each(pos, list)
+                       strlist__for_each_entry(pos, list)
                                if (build_id_cache__update_file(pos->s)) {
                                        if (errno == ENOENT) {
                                                pr_debug("%s wasn't in the cache\n",
@@ -408,7 +408,7 @@ int cmd_buildid_cache(int argc, const char **argv,
                                                continue;
                                        }
                                        pr_warning("Couldn't update %s: %s\n",
-                                                  pos->s, strerror_r(errno, sbuf, sizeof(sbuf)));
+                                                  pos->s, str_error_r(errno, sbuf, sizeof(sbuf)));
                                }
 
                        strlist__delete(list);
@@ -419,8 +419,7 @@ int cmd_buildid_cache(int argc, const char **argv,
                pr_warning("Couldn't add %s\n", kcore_filename);
 
 out:
-       if (session)
-               perf_session__delete(session);
+       perf_session__delete(session);
 
        return ret;
 }
index fe1b77fa21f91c409666f2fd9ede2f2df82a1d76..e4207a23b52c0924e1d5c119f888467f7e9f1150 100644 (file)
@@ -37,23 +37,16 @@ static int show_config(struct perf_config_set *set)
 {
        struct perf_config_section *section;
        struct perf_config_item *item;
-       struct list_head *sections;
 
        if (set == NULL)
                return -1;
 
-       sections = &set->sections;
-       if (list_empty(sections))
-               return -1;
-
-       list_for_each_entry(section, sections, node) {
-               list_for_each_entry(item, &section->items, node) {
-                       char *value = item->value;
+       perf_config_set__for_each_entry(set, section, item) {
+               char *value = item->value;
 
-                       if (value)
-                               printf("%s.%s=%s\n", section->name,
-                                      item->name, value);
-               }
+               if (value)
+                       printf("%s.%s=%s\n", section->name,
+                              item->name, value);
        }
 
        return 0;
@@ -80,6 +73,10 @@ int cmd_config(int argc, const char **argv, const char *prefix __maybe_unused)
        else if (use_user_config)
                config_exclusive_filename = user_config;
 
+       /*
+        * At only 'config' sub-command, individually use the config set
+        * because of reinitializing with options config file location.
+        */
        set = perf_config_set__new();
        if (!set) {
                ret = -1;
index b97bc1518b44a5f7ffb85eeefa7f6ffae95e74f1..7ad6e17ac6b362cadb02e453d36ce5bb288ff2e1 100644 (file)
@@ -3,6 +3,7 @@
 #include "perf.h"
 #include "debug.h"
 #include <subcmd/parse-options.h>
+#include "data-convert.h"
 #include "data-convert-bt.h"
 
 typedef int (*data_cmd_fn_t)(int argc, const char **argv, const char *prefix);
@@ -53,14 +54,18 @@ static int cmd_data_convert(int argc, const char **argv,
                            const char *prefix __maybe_unused)
 {
        const char *to_ctf     = NULL;
-       bool force = false;
+       struct perf_data_convert_opts opts = {
+               .force = false,
+               .all = false,
+       };
        const struct option options[] = {
                OPT_INCR('v', "verbose", &verbose, "be more verbose"),
                OPT_STRING('i', "input", &input_name, "file", "input file name"),
 #ifdef HAVE_LIBBABELTRACE_SUPPORT
                OPT_STRING(0, "to-ctf", &to_ctf, NULL, "Convert to CTF format"),
 #endif
-               OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+               OPT_BOOLEAN('f', "force", &opts.force, "don't complain, do it"),
+               OPT_BOOLEAN(0, "all", &opts.all, "Convert all events"),
                OPT_END()
        };
 
@@ -78,7 +83,7 @@ static int cmd_data_convert(int argc, const char **argv,
 
        if (to_ctf) {
 #ifdef HAVE_LIBBABELTRACE_SUPPORT
-               return bt_convert__perf2ctf(input_name, to_ctf, force);
+               return bt_convert__perf2ctf(input_name, to_ctf, &opts);
 #else
                pr_err("The libbabeltrace support is not compiled in.\n");
                return -1;
index f7645a42708eb2223069b3555df0a325c2c635d8..21ee753211adfa954b9e25fb1df9be8354dfc8a5 100644 (file)
@@ -310,16 +310,6 @@ static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
        return -1;
 }
 
-static int hists__add_entry(struct hists *hists,
-                           struct addr_location *al,
-                           struct perf_sample *sample)
-{
-       if (__hists__add_entry(hists, al, NULL, NULL, NULL,
-                              sample, true) != NULL)
-               return 0;
-       return -ENOMEM;
-}
-
 static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
                                      union perf_event *event,
                                      struct perf_sample *sample,
@@ -336,7 +326,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
                return -1;
        }
 
-       if (hists__add_entry(hists, &al, sample)) {
+       if (!hists__add_entry(hists, &al, NULL, NULL, NULL, sample, true)) {
                pr_warning("problem incrementing symbol period, skipping event\n");
                goto out_put;
        }
@@ -373,7 +363,7 @@ static struct perf_evsel *evsel_match(struct perf_evsel *evsel,
 {
        struct perf_evsel *e;
 
-       evlist__for_each(evlist, e) {
+       evlist__for_each_entry(evlist, e) {
                if (perf_evsel__match2(evsel, e))
                        return e;
        }
@@ -385,7 +375,7 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                struct hists *hists = evsel__hists(evsel);
 
                hists__collapse_resort(hists, NULL);
@@ -666,7 +656,8 @@ static void hists__process(struct hists *hists)
        hists__precompute(hists);
        hists__output_resort(hists, NULL);
 
-       hists__fprintf(hists, true, 0, 0, 0, stdout);
+       hists__fprintf(hists, true, 0, 0, 0, stdout,
+                      symbol_conf.use_callchain);
 }
 
 static void data__fprintf(void)
@@ -690,7 +681,7 @@ static void data_process(void)
        struct perf_evsel *evsel_base;
        bool first = true;
 
-       evlist__for_each(evlist_base, evsel_base) {
+       evlist__for_each_entry(evlist_base, evsel_base) {
                struct hists *hists_base = evsel__hists(evsel_base);
                struct data__file *d;
                int i;
@@ -765,9 +756,7 @@ static int __cmd_diff(void)
 
  out_delete:
        data__for_each_file(i, d) {
-               if (d->session)
-                       perf_session__delete(d->session);
-
+               perf_session__delete(d->session);
                data__free(d);
        }
 
@@ -1044,7 +1033,7 @@ static int hpp__entry_global(struct perf_hpp_fmt *_fmt, struct perf_hpp *hpp,
 }
 
 static int hpp__header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
-                      struct perf_evsel *evsel __maybe_unused)
+                      struct hists *hists __maybe_unused)
 {
        struct diff_hpp_fmt *dfmt =
                container_of(fmt, struct diff_hpp_fmt, fmt);
@@ -1055,7 +1044,7 @@ static int hpp__header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
 
 static int hpp__width(struct perf_hpp_fmt *fmt,
                      struct perf_hpp *hpp __maybe_unused,
-                     struct perf_evsel *evsel __maybe_unused)
+                     struct hists *hists __maybe_unused)
 {
        struct diff_hpp_fmt *dfmt =
                container_of(fmt, struct diff_hpp_fmt, fmt);
index 8a31f511e1a0d79b9784387fa7b01db42ac3094e..e09c4287fe87435d200e20f9f216de9aa71129c7 100644 (file)
@@ -32,7 +32,7 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
        if (session == NULL)
                return -1;
 
-       evlist__for_each(session->evlist, pos) {
+       evlist__for_each_entry(session->evlist, pos) {
                perf_evsel__fprintf(pos, details, stdout);
 
                if (pos->attr.type == PERF_TYPE_TRACEPOINT)
index f9830c902b78c6ea59b723d69fa3cb9651a90954..3bdb2c78a21b3f0118d0c7a376222b7bb96c4735 100644 (file)
@@ -4,7 +4,7 @@
  * Builtin help command
  */
 #include "perf.h"
-#include "util/cache.h"
+#include "util/config.h"
 #include "builtin.h"
 #include <subcmd/exec-cmd.h>
 #include "common-cmds.h"
@@ -117,7 +117,7 @@ static void exec_woman_emacs(const char *path, const char *page)
                        free(man_page);
                }
                warning("failed to exec '%s': %s", path,
-                       strerror_r(errno, sbuf, sizeof(sbuf)));
+                       str_error_r(errno, sbuf, sizeof(sbuf)));
        }
 }
 
@@ -150,7 +150,7 @@ static void exec_man_konqueror(const char *path, const char *page)
                        free(man_page);
                }
                warning("failed to exec '%s': %s", path,
-                       strerror_r(errno, sbuf, sizeof(sbuf)));
+                       str_error_r(errno, sbuf, sizeof(sbuf)));
        }
 }
 
@@ -162,7 +162,7 @@ static void exec_man_man(const char *path, const char *page)
                path = "man";
        execlp(path, "man", page, NULL);
        warning("failed to exec '%s': %s", path,
-               strerror_r(errno, sbuf, sizeof(sbuf)));
+               str_error_r(errno, sbuf, sizeof(sbuf)));
 }
 
 static void exec_man_cmd(const char *cmd, const char *page)
@@ -175,7 +175,7 @@ static void exec_man_cmd(const char *cmd, const char *page)
                free(shell_cmd);
        }
        warning("failed to exec '%s': %s", cmd,
-               strerror_r(errno, sbuf, sizeof(sbuf)));
+               str_error_r(errno, sbuf, sizeof(sbuf)));
 }
 
 static void add_man_viewer(const char *name)
index e5afa8fe1bf1125ab577ea090befa42dbe64760c..73c1c4cc36009d79a3f33c80db6e4834dc21b81a 100644 (file)
@@ -562,7 +562,7 @@ static void strip_init(struct perf_inject *inject)
 
        inject->tool.context_switch = perf_event__drop;
 
-       evlist__for_each(evlist, evsel)
+       evlist__for_each_entry(evlist, evsel)
                evsel->handler = drop_sample;
 }
 
@@ -590,7 +590,7 @@ static bool ok_to_remove(struct perf_evlist *evlist,
        if (!has_tracking(evsel_to_remove))
                return true;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->handler != drop_sample) {
                        cnt += 1;
                        if ((evsel->attr.sample_type & COMPAT_MASK) ==
@@ -608,7 +608,7 @@ static void strip_fini(struct perf_inject *inject)
        struct perf_evsel *evsel, *tmp;
 
        /* Remove non-synthesized evsels if possible */
-       evlist__for_each_safe(evlist, tmp, evsel) {
+       evlist__for_each_entry_safe(evlist, tmp, evsel) {
                if (evsel->handler == drop_sample &&
                    ok_to_remove(evlist, evsel)) {
                        pr_debug("Deleting %s\n", perf_evsel__name(evsel));
@@ -643,7 +643,7 @@ static int __cmd_inject(struct perf_inject *inject)
        } else if (inject->sched_stat) {
                struct perf_evsel *evsel;
 
-               evlist__for_each(session->evlist, evsel) {
+               evlist__for_each_entry(session->evlist, evsel) {
                        const char *name = perf_evsel__name(evsel);
 
                        if (!strcmp(name, "sched:sched_switch")) {
index 58adfee230de8c2c2d36b5692b1cc668c1af5f01..b1d491c2e7047c5ba7ef1dac4f86db697f8a4c19 100644 (file)
@@ -4,7 +4,7 @@
 #include "util/evlist.h"
 #include "util/evsel.h"
 #include "util/util.h"
-#include "util/cache.h"
+#include "util/config.h"
 #include "util/symbol.h"
 #include "util/thread.h"
 #include "util/header.h"
@@ -1354,7 +1354,7 @@ static int __cmd_kmem(struct perf_session *session)
                goto out;
        }
 
-       evlist__for_each(session->evlist, evsel) {
+       evlist__for_each_entry(session->evlist, evsel) {
                if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
                    perf_evsel__field(evsel, "pfn")) {
                        use_pfn = true;
index 6487c06d270853fdf3c0e3a101f821d5692871a6..5e2127e04f8386c4aa92b62e0370afff08d9e9b5 100644 (file)
@@ -988,7 +988,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
         * Note: exclude_{guest,host} do not apply here.
         *       This command processes KVM tracepoints from host only
         */
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                struct perf_event_attr *attr = &pos->attr;
 
                /* make sure these *are* set */
@@ -1018,13 +1018,13 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
        err = perf_evlist__open(evlist);
        if (err < 0) {
                printf("Couldn't create the events: %s\n",
-                      strerror_r(errno, sbuf, sizeof(sbuf)));
+                      str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out;
        }
 
        if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) {
                ui__error("Failed to mmap the events: %s\n",
-                         strerror_r(errno, sbuf, sizeof(sbuf)));
+                         str_error_r(errno, sbuf, sizeof(sbuf)));
                perf_evlist__close(evlist);
                goto out;
        }
@@ -1426,11 +1426,9 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
        err = kvm_events_live_report(kvm);
 
 out:
-       if (kvm->session)
-               perf_session__delete(kvm->session);
+       perf_session__delete(kvm->session);
        kvm->session = NULL;
-       if (kvm->evlist)
-               perf_evlist__delete(kvm->evlist);
+       perf_evlist__delete(kvm->evlist);
 
        return err;
 }
index 5e22db4684b86a7a226c5d070594f160f2bc9237..88ee419e518925de5523fa08899e6935447630c3 100644 (file)
@@ -25,7 +25,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                OPT_END()
        };
        const char * const list_usage[] = {
-               "perf list [hw|sw|cache|tracepoint|pmu|event_glob]",
+               "perf list [hw|sw|cache|tracepoint|pmu|sdt|event_glob]",
                NULL
        };
 
@@ -62,6 +62,8 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                        print_hwcache_events(NULL, raw_dump);
                else if (strcmp(argv[i], "pmu") == 0)
                        print_pmu_events(NULL, raw_dump);
+               else if (strcmp(argv[i], "sdt") == 0)
+                       print_sdt_events(NULL, NULL, raw_dump);
                else if ((sep = strchr(argv[i], ':')) != NULL) {
                        int sep_idx;
 
@@ -76,6 +78,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
 
                        s[sep_idx] = '\0';
                        print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
+                       print_sdt_events(s, s + sep_idx + 1, raw_dump);
                        free(s);
                } else {
                        if (asprintf(&s, "*%s*", argv[i]) < 0) {
@@ -89,6 +92,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
                        print_hwcache_events(s, raw_dump);
                        print_pmu_events(s, raw_dump);
                        print_tracepoint_events(NULL, s, raw_dump);
+                       print_sdt_events(NULL, s, raw_dump);
                        free(s);
                }
        }
index 1dc140c5481d61a1639de2fb5bcb2e1021b0dfc5..d608a2c9e48cd219e82697bdfa9331a477e9eeed 100644 (file)
@@ -67,6 +67,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
        OPT_CALLBACK('e', "event", &mem, "event",
                     "event selector. use 'perf mem record -e list' to list available events",
                     parse_record_events),
+       OPT_UINTEGER(0, "ldlat", &perf_mem_events__loads_ldlat, "mem-loads latency"),
        OPT_INCR('v', "verbose", &verbose,
                 "be more verbose (show counter open errors, etc)"),
        OPT_BOOLEAN('U', "--all-user", &all_user, "collect only user level data"),
index 9af859b28b15cc74edbf8041278ec2ee9e47104e..ee5b42173ba374e066551f32eba24be8c863aebc 100644 (file)
@@ -44,7 +44,7 @@
 
 #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
 #define DEFAULT_FUNC_FILTER "!_*"
-#define DEFAULT_LIST_FILTER "*:*"
+#define DEFAULT_LIST_FILTER "*"
 
 /* Session management structure */
 static struct {
@@ -308,7 +308,7 @@ static void pr_err_with_code(const char *msg, int err)
 
        pr_err("%s", msg);
        pr_debug(" Reason: %s (Code: %d)",
-                strerror_r(-err, sbuf, sizeof(sbuf)), err);
+                str_error_r(-err, sbuf, sizeof(sbuf)), err);
        pr_err("\n");
 }
 
@@ -363,6 +363,32 @@ out_cleanup:
        return ret;
 }
 
+static int del_perf_probe_caches(struct strfilter *filter)
+{
+       struct probe_cache *cache;
+       struct strlist *bidlist;
+       struct str_node *nd;
+       int ret;
+
+       bidlist = build_id_cache__list_all(false);
+       if (!bidlist) {
+               ret = -errno;
+               pr_debug("Failed to get buildids: %d\n", ret);
+               return ret ?: -ENOMEM;
+       }
+
+       strlist__for_each_entry(nd, bidlist) {
+               cache = probe_cache__new(nd->s);
+               if (!cache)
+                       continue;
+               if (probe_cache__filter_purge(cache, filter) < 0 ||
+                   probe_cache__commit(cache) < 0)
+                       pr_warning("Failed to remove entries for %s\n", nd->s);
+               probe_cache__delete(cache);
+       }
+       return 0;
+}
+
 static int perf_del_probe_events(struct strfilter *filter)
 {
        int ret, ret2, ufd = -1, kfd = -1;
@@ -375,6 +401,9 @@ static int perf_del_probe_events(struct strfilter *filter)
 
        pr_debug("Delete filter: \'%s\'\n", str);
 
+       if (probe_conf.cache)
+               return del_perf_probe_caches(filter);
+
        /* Get current event names */
        ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW);
        if (ret < 0)
@@ -389,7 +418,7 @@ static int perf_del_probe_events(struct strfilter *filter)
 
        ret = probe_file__get_events(kfd, filter, klist);
        if (ret == 0) {
-               strlist__for_each(ent, klist)
+               strlist__for_each_entry(ent, klist)
                        pr_info("Removed event: %s\n", ent->s);
 
                ret = probe_file__del_strlist(kfd, klist);
@@ -399,7 +428,7 @@ static int perf_del_probe_events(struct strfilter *filter)
 
        ret2 = probe_file__get_events(ufd, filter, ulist);
        if (ret2 == 0) {
-               strlist__for_each(ent, ulist)
+               strlist__for_each_entry(ent, ulist)
                        pr_info("Removed event: %s\n", ent->s);
 
                ret2 = probe_file__del_strlist(ufd, ulist);
@@ -512,6 +541,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                    "Enable symbol demangling"),
        OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
                    "Enable kernel symbol demangling"),
+       OPT_BOOLEAN(0, "cache", &probe_conf.cache, "Manipulate probe cache"),
        OPT_END()
        };
        int ret;
index dc3fcb597e4c10cf091dcad73a53776387baab16..8f2c16d9275f303aca4c8ba3d7f148fb71a4df00 100644 (file)
@@ -13,6 +13,7 @@
 #include "util/util.h"
 #include <subcmd/parse-options.h>
 #include "util/parse-events.h"
+#include "util/config.h"
 
 #include "util/callchain.h"
 #include "util/cgroup.h"
@@ -118,11 +119,10 @@ backward_rb_find_range(void *buf, int mask, u64 head, u64 *start, u64 *end)
 }
 
 static int
-rb_find_range(struct perf_evlist *evlist,
-             void *data, int mask, u64 head, u64 old,
-             u64 *start, u64 *end)
+rb_find_range(void *data, int mask, u64 head, u64 old,
+             u64 *start, u64 *end, bool backward)
 {
-       if (!evlist->backward) {
+       if (!backward) {
                *start = old;
                *end = head;
                return 0;
@@ -131,9 +131,10 @@ rb_find_range(struct perf_evlist *evlist,
        return backward_rb_find_range(data, mask, head, start, end);
 }
 
-static int record__mmap_read(struct record *rec, int idx)
+static int
+record__mmap_read(struct record *rec, struct perf_mmap *md,
+                 bool overwrite, bool backward)
 {
-       struct perf_mmap *md = &rec->evlist->mmap[idx];
        u64 head = perf_mmap__read_head(md);
        u64 old = md->prev;
        u64 end = head, start = old;
@@ -142,8 +143,8 @@ static int record__mmap_read(struct record *rec, int idx)
        void *buf;
        int rc = 0;
 
-       if (rb_find_range(rec->evlist, data, md->mask, head,
-                         old, &start, &end))
+       if (rb_find_range(data, md->mask, head,
+                         old, &start, &end, backward))
                return -1;
 
        if (start == end)
@@ -156,7 +157,7 @@ static int record__mmap_read(struct record *rec, int idx)
                WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
 
                md->prev = head;
-               perf_evlist__mmap_consume(rec->evlist, idx);
+               perf_mmap__consume(md, overwrite || backward);
                return 0;
        }
 
@@ -181,7 +182,7 @@ static int record__mmap_read(struct record *rec, int idx)
        }
 
        md->prev = head;
-       perf_evlist__mmap_consume(rec->evlist, idx);
+       perf_mmap__consume(md, overwrite || backward);
 out:
        return rc;
 }
@@ -341,6 +342,40 @@ int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
 
 #endif
 
+static int record__mmap_evlist(struct record *rec,
+                              struct perf_evlist *evlist)
+{
+       struct record_opts *opts = &rec->opts;
+       char msg[512];
+
+       if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
+                                opts->auxtrace_mmap_pages,
+                                opts->auxtrace_snapshot_mode) < 0) {
+               if (errno == EPERM) {
+                       pr_err("Permission error mapping pages.\n"
+                              "Consider increasing "
+                              "/proc/sys/kernel/perf_event_mlock_kb,\n"
+                              "or try again with a smaller value of -m/--mmap_pages.\n"
+                              "(current value: %u,%u)\n",
+                              opts->mmap_pages, opts->auxtrace_mmap_pages);
+                       return -errno;
+               } else {
+                       pr_err("failed to mmap with %d (%s)\n", errno,
+                               str_error_r(errno, msg, sizeof(msg)));
+                       if (errno)
+                               return -errno;
+                       else
+                               return -EINVAL;
+               }
+       }
+       return 0;
+}
+
+static int record__mmap(struct record *rec)
+{
+       return record__mmap_evlist(rec, rec->evlist);
+}
+
 static int record__open(struct record *rec)
 {
        char msg[512];
@@ -352,7 +387,7 @@ static int record__open(struct record *rec)
 
        perf_evlist__config(evlist, opts, &callchain_param);
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
 try_again:
                if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
                        if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
@@ -372,32 +407,14 @@ try_again:
        if (perf_evlist__apply_filters(evlist, &pos)) {
                error("failed to set filter \"%s\" on event %s with %d (%s)\n",
                        pos->filter, perf_evsel__name(pos), errno,
-                       strerror_r(errno, msg, sizeof(msg)));
+                       str_error_r(errno, msg, sizeof(msg)));
                rc = -1;
                goto out;
        }
 
-       if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
-                                opts->auxtrace_mmap_pages,
-                                opts->auxtrace_snapshot_mode) < 0) {
-               if (errno == EPERM) {
-                       pr_err("Permission error mapping pages.\n"
-                              "Consider increasing "
-                              "/proc/sys/kernel/perf_event_mlock_kb,\n"
-                              "or try again with a smaller value of -m/--mmap_pages.\n"
-                              "(current value: %u,%u)\n",
-                              opts->mmap_pages, opts->auxtrace_mmap_pages);
-                       rc = -errno;
-               } else {
-                       pr_err("failed to mmap with %d (%s)\n", errno,
-                               strerror_r(errno, msg, sizeof(msg)));
-                       if (errno)
-                               rc = -errno;
-                       else
-                               rc = -EINVAL;
-               }
+       rc = record__mmap(rec);
+       if (rc)
                goto out;
-       }
 
        session->evlist = evlist;
        perf_session__set_id_hdr_size(session);
@@ -481,17 +498,30 @@ static struct perf_event_header finished_round_event = {
        .type = PERF_RECORD_FINISHED_ROUND,
 };
 
-static int record__mmap_read_all(struct record *rec)
+static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
+                                   bool backward)
 {
        u64 bytes_written = rec->bytes_written;
        int i;
        int rc = 0;
+       struct perf_mmap *maps;
 
-       for (i = 0; i < rec->evlist->nr_mmaps; i++) {
-               struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
+       if (!evlist)
+               return 0;
 
-               if (rec->evlist->mmap[i].base) {
-                       if (record__mmap_read(rec, i) != 0) {
+       maps = backward ? evlist->backward_mmap : evlist->mmap;
+       if (!maps)
+               return 0;
+
+       if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
+               return 0;
+
+       for (i = 0; i < evlist->nr_mmaps; i++) {
+               struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
+
+               if (maps[i].base) {
+                       if (record__mmap_read(rec, &maps[i],
+                                             evlist->overwrite, backward) != 0) {
                                rc = -1;
                                goto out;
                        }
@@ -511,10 +541,23 @@ static int record__mmap_read_all(struct record *rec)
        if (bytes_written != rec->bytes_written)
                rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
 
+       if (backward)
+               perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
 out:
        return rc;
 }
 
+static int record__mmap_read_all(struct record *rec)
+{
+       int err;
+
+       err = record__mmap_read_evlist(rec, rec->evlist, false);
+       if (err)
+               return err;
+
+       return record__mmap_read_evlist(rec, rec->evlist, true);
+}
+
 static void record__init_features(struct record *rec)
 {
        struct perf_session *session = rec->session;
@@ -561,13 +604,16 @@ record__finish_output(struct record *rec)
        return;
 }
 
-static int record__synthesize_workload(struct record *rec)
+static int record__synthesize_workload(struct record *rec, bool tail)
 {
        struct {
                struct thread_map map;
                struct thread_map_data map_data;
        } thread_map;
 
+       if (rec->opts.tail_synthesize != tail)
+               return 0;
+
        thread_map.map.nr = 1;
        thread_map.map.map[0].pid = rec->evlist->workload.pid;
        thread_map.map.map[0].comm = NULL;
@@ -578,7 +624,7 @@ static int record__synthesize_workload(struct record *rec)
                                                 rec->opts.proc_map_timeout);
 }
 
-static int record__synthesize(struct record *rec);
+static int record__synthesize(struct record *rec, bool tail);
 
 static int
 record__switch_output(struct record *rec, bool at_exit)
@@ -589,6 +635,10 @@ record__switch_output(struct record *rec, bool at_exit)
        /* Same Size:      "2015122520103046"*/
        char timestamp[] = "InvalidTimestamp";
 
+       record__synthesize(rec, true);
+       if (target__none(&rec->opts.target))
+               record__synthesize_workload(rec, true);
+
        rec->samples = 0;
        record__finish_output(rec);
        err = fetch_current_timestamp(timestamp, sizeof(timestamp));
@@ -611,7 +661,7 @@ record__switch_output(struct record *rec, bool at_exit)
 
        /* Output tracking events */
        if (!at_exit) {
-               record__synthesize(rec);
+               record__synthesize(rec, false);
 
                /*
                 * In 'perf record --switch-output' without -a,
@@ -623,7 +673,7 @@ record__switch_output(struct record *rec, bool at_exit)
                 * perf_event__synthesize_thread_map() for those events.
                 */
                if (target__none(&rec->opts.target))
-                       record__synthesize_workload(rec);
+                       record__synthesize_workload(rec, false);
        }
        return fd;
 }
@@ -655,7 +705,29 @@ perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused
        return 0;
 }
 
-static int record__synthesize(struct record *rec)
+static const struct perf_event_mmap_page *
+perf_evlist__pick_pc(struct perf_evlist *evlist)
+{
+       if (evlist) {
+               if (evlist->mmap && evlist->mmap[0].base)
+                       return evlist->mmap[0].base;
+               if (evlist->backward_mmap && evlist->backward_mmap[0].base)
+                       return evlist->backward_mmap[0].base;
+       }
+       return NULL;
+}
+
+static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
+{
+       const struct perf_event_mmap_page *pc;
+
+       pc = perf_evlist__pick_pc(rec->evlist);
+       if (pc)
+               return pc;
+       return NULL;
+}
+
+static int record__synthesize(struct record *rec, bool tail)
 {
        struct perf_session *session = rec->session;
        struct machine *machine = &session->machines.host;
@@ -665,6 +737,9 @@ static int record__synthesize(struct record *rec)
        int fd = perf_data_file__fd(file);
        int err = 0;
 
+       if (rec->opts.tail_synthesize != tail)
+               return 0;
+
        if (file->is_pipe) {
                err = perf_event__synthesize_attrs(tool, session,
                                                   process_synthesized_event);
@@ -692,7 +767,7 @@ static int record__synthesize(struct record *rec)
                }
        }
 
-       err = perf_event__synth_time_conv(rec->evlist->mmap[0].base, tool,
+       err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
                                          process_synthesized_event, machine);
        if (err)
                goto out;
@@ -828,7 +903,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
 
        machine = &session->machines.host;
 
-       err = record__synthesize(rec);
+       err = record__synthesize(rec, false);
        if (err < 0)
                goto out_child;
 
@@ -888,6 +963,17 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        for (;;) {
                unsigned long long hits = rec->samples;
 
+               /*
+                * rec->evlist->bkw_mmap_state is possible to be
+                * BKW_MMAP_EMPTY here: when done == true and
+                * hits != rec->samples in previous round.
+                *
+                * perf_evlist__toggle_bkw_mmap ensure we never
+                * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
+                */
+               if (trigger_is_hit(&switch_output_trigger) || done || draining)
+                       perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
+
                if (record__mmap_read_all(rec) < 0) {
                        trigger_error(&auxtrace_snapshot_trigger);
                        trigger_error(&switch_output_trigger);
@@ -907,8 +993,26 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                }
 
                if (trigger_is_hit(&switch_output_trigger)) {
+                       /*
+                        * If switch_output_trigger is hit, the data in
+                        * overwritable ring buffer should have been collected,
+                        * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
+                        *
+                        * If SIGUSR2 raise after or during record__mmap_read_all(),
+                        * record__mmap_read_all() didn't collect data from
+                        * overwritable ring buffer. Read again.
+                        */
+                       if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
+                               continue;
                        trigger_ready(&switch_output_trigger);
 
+                       /*
+                        * Reenable events in overwrite ring buffer after
+                        * record__mmap_read_all(): we should have collected
+                        * data from it.
+                        */
+                       perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
+
                        if (!quiet)
                                fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
                                        waking);
@@ -954,7 +1058,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
 
        if (forks && workload_exec_errno) {
                char msg[STRERR_BUFSIZE];
-               const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
+               const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
                pr_err("Workload failed: %s\n", emsg);
                err = -1;
                goto out_child;
@@ -963,6 +1067,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        if (!quiet)
                fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
 
+       if (target__none(&rec->opts.target))
+               record__synthesize_workload(rec, true);
+
 out_child:
        if (forks) {
                int exit_status;
@@ -981,6 +1088,7 @@ out_child:
        } else
                status = err;
 
+       record__synthesize(rec, true);
        /* this will be recalculated during process_buildids() */
        rec->samples = 0;
 
@@ -1267,6 +1375,8 @@ static struct record record = {
 const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
        "\n\t\t\t\tDefault: fp";
 
+static bool dry_run;
+
 /*
  * XXX Will stay a global variable till we fix builtin-script.c to stop messing
  * with it and switch to use the library functions in perf_evlist that came
@@ -1303,6 +1413,9 @@ struct option __record_options[] = {
        OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
                        &record.opts.no_inherit_set,
                        "child tasks do not inherit counters"),
+       OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
+                   "synthesize non-sample events at the end of output"),
+       OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
        OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
        OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
                     "number of mmap data pages and AUX area tracing mmap pages",
@@ -1386,6 +1499,8 @@ struct option __record_options[] = {
                    "append timestamp to output filename"),
        OPT_BOOLEAN(0, "switch-output", &record.switch_output,
                    "Switch output when receive SIGUSR2"),
+       OPT_BOOLEAN(0, "dry-run", &dry_run,
+                   "Parse options then exit"),
        OPT_END()
 };
 
@@ -1455,6 +1570,9 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
        if (err)
                return err;
 
+       if (dry_run)
+               return 0;
+
        err = bpf__setup_stdout(rec->evlist);
        if (err) {
                bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
@@ -1508,6 +1626,9 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
                }
        }
 
+       if (record.opts.overwrite)
+               record.opts.tail_synthesize = true;
+
        if (rec->evlist->nr_entries == 0 &&
            perf_evlist__add_default(rec->evlist) < 0) {
                pr_err("Not enough memory for event selector list\n");
index a87cb338bdf14b2d49c19b840fb99b3265e68997..949e5a15c960e2ef190cc97217df29d561c73777 100644 (file)
@@ -8,7 +8,7 @@
 #include "builtin.h"
 
 #include "util/util.h"
-#include "util/cache.h"
+#include "util/config.h"
 
 #include "util/annotate.h"
 #include "util/color.h"
@@ -361,7 +361,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
        struct perf_evsel *pos;
 
        fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n", evlist->stats.total_lost_samples);
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                struct hists *hists = evsel__hists(pos);
                const char *evname = perf_evsel__name(pos);
 
@@ -370,7 +370,8 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
                        continue;
 
                hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
-               hists__fprintf(hists, true, 0, 0, rep->min_percent, stdout);
+               hists__fprintf(hists, true, 0, 0, rep->min_percent, stdout,
+                              symbol_conf.use_callchain);
                fprintf(stdout, "\n\n");
        }
 
@@ -477,7 +478,7 @@ static int report__collapse_hists(struct report *rep)
 
        ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
 
-       evlist__for_each(rep->session->evlist, pos) {
+       evlist__for_each_entry(rep->session->evlist, pos) {
                struct hists *hists = evsel__hists(pos);
 
                if (pos->idx == 0)
@@ -510,7 +511,7 @@ static void report__output_resort(struct report *rep)
 
        ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
 
-       evlist__for_each(rep->session->evlist, pos)
+       evlist__for_each_entry(rep->session->evlist, pos)
                perf_evsel__output_resort(pos, &prog);
 
        ui_progress__finish();
@@ -551,7 +552,7 @@ static int __cmd_report(struct report *rep)
 
        report__warn_kptr_restrict(rep);
 
-       evlist__for_each(session->evlist, pos)
+       evlist__for_each_entry(session->evlist, pos)
                rep->nr_entries += evsel__hists(pos)->nr_entries;
 
        if (use_browser == 0) {
@@ -582,7 +583,7 @@ static int __cmd_report(struct report *rep)
         * might be changed during the collapse phase.
         */
        rep->nr_entries = 0;
-       evlist__for_each(session->evlist, pos)
+       evlist__for_each_entry(session->evlist, pos)
                rep->nr_entries += evsel__hists(pos)->nr_entries;
 
        if (rep->nr_entries == 0) {
@@ -816,6 +817,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
                    "Show raw trace event output (do not use print fmt or plugins)"),
        OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
                    "Show entries in a hierarchy"),
+       OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode",
+                            "'always' (default), 'never' or 'auto' only applicable to --stdio mode",
+                            stdio__config_color, "always"),
        OPT_END()
        };
        struct perf_data_file file = {
index afa057666c2adf68ec1279dc29a027744ec38fff..0dfe8df2ab9b237b2530bbb4c81967b9eac6a689 100644 (file)
@@ -494,7 +494,7 @@ force_again:
                }
                pr_err("Error: sys_perf_event_open() syscall returned "
                       "with %d (%s)\n%s", fd,
-                      strerror_r(errno, sbuf, sizeof(sbuf)), info);
+                      str_error_r(errno, sbuf, sizeof(sbuf)), info);
                exit(EXIT_FAILURE);
        }
        return fd;
index e3ce2f34d3ad5276cc8f10d78b6590c41b56c179..971ff91b16cb3be52702cca780c3df818d52c51a 100644 (file)
@@ -21,6 +21,7 @@
 #include "util/cpumap.h"
 #include "util/thread_map.h"
 #include "util/stat.h"
+#include "util/thread-stack.h"
 #include <linux/bitmap.h>
 #include <linux/stringify.h>
 #include "asm/bug.h"
@@ -63,6 +64,7 @@ enum perf_output_field {
        PERF_OUTPUT_DATA_SRC        = 1U << 17,
        PERF_OUTPUT_WEIGHT          = 1U << 18,
        PERF_OUTPUT_BPF_OUTPUT      = 1U << 19,
+       PERF_OUTPUT_CALLINDENT      = 1U << 20,
 };
 
 struct output_option {
@@ -89,6 +91,7 @@ struct output_option {
        {.str = "data_src", .field = PERF_OUTPUT_DATA_SRC},
        {.str = "weight",   .field = PERF_OUTPUT_WEIGHT},
        {.str = "bpf-output",   .field = PERF_OUTPUT_BPF_OUTPUT},
+       {.str = "callindent", .field = PERF_OUTPUT_CALLINDENT},
 };
 
 /* default set to maintain compatibility with current format */
@@ -339,7 +342,7 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
  */
 static int perf_session__check_output_opt(struct perf_session *session)
 {
-       int j;
+       unsigned int j;
        struct perf_evsel *evsel;
 
        for (j = 0; j < PERF_TYPE_MAX; ++j) {
@@ -369,7 +372,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
        if (!no_callchain) {
                bool use_callchain = false;
 
-               evlist__for_each(session->evlist, evsel) {
+               evlist__for_each_entry(session->evlist, evsel) {
                        if (evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
                                use_callchain = true;
                                break;
@@ -388,17 +391,20 @@ static int perf_session__check_output_opt(struct perf_session *session)
                struct perf_event_attr *attr;
 
                j = PERF_TYPE_TRACEPOINT;
-               evsel = perf_session__find_first_evtype(session, j);
-               if (evsel == NULL)
-                       goto out;
 
-               attr = &evsel->attr;
+               evlist__for_each_entry(session->evlist, evsel) {
+                       if (evsel->attr.type != j)
+                               continue;
+
+                       attr = &evsel->attr;
 
-               if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) {
-                       output[j].fields |= PERF_OUTPUT_IP;
-                       output[j].fields |= PERF_OUTPUT_SYM;
-                       output[j].fields |= PERF_OUTPUT_DSO;
-                       set_print_ip_opts(attr);
+                       if (attr->sample_type & PERF_SAMPLE_CALLCHAIN) {
+                               output[j].fields |= PERF_OUTPUT_IP;
+                               output[j].fields |= PERF_OUTPUT_SYM;
+                               output[j].fields |= PERF_OUTPUT_DSO;
+                               set_print_ip_opts(attr);
+                               goto out;
+                       }
                }
        }
 
@@ -559,6 +565,62 @@ static void print_sample_addr(struct perf_sample *sample,
        }
 }
 
+static void print_sample_callindent(struct perf_sample *sample,
+                                   struct perf_evsel *evsel,
+                                   struct thread *thread,
+                                   struct addr_location *al)
+{
+       struct perf_event_attr *attr = &evsel->attr;
+       size_t depth = thread_stack__depth(thread);
+       struct addr_location addr_al;
+       const char *name = NULL;
+       static int spacing;
+       int len = 0;
+       u64 ip = 0;
+
+       /*
+        * The 'return' has already been popped off the stack so the depth has
+        * to be adjusted to match the 'call'.
+        */
+       if (thread->ts && sample->flags & PERF_IP_FLAG_RETURN)
+               depth += 1;
+
+       if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
+               if (sample_addr_correlates_sym(attr)) {
+                       thread__resolve(thread, &addr_al, sample);
+                       if (addr_al.sym)
+                               name = addr_al.sym->name;
+                       else
+                               ip = sample->addr;
+               } else {
+                       ip = sample->addr;
+               }
+       } else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) {
+               if (al->sym)
+                       name = al->sym->name;
+               else
+                       ip = sample->ip;
+       }
+
+       if (name)
+               len = printf("%*s%s", (int)depth * 4, "", name);
+       else if (ip)
+               len = printf("%*s%16" PRIx64, (int)depth * 4, "", ip);
+
+       if (len < 0)
+               return;
+
+       /*
+        * Try to keep the output length from changing frequently so that the
+        * output lines up more nicely.
+        */
+       if (len > spacing || (len && len < spacing - 52))
+               spacing = round_up(len + 4, 32);
+
+       if (len < spacing)
+               printf("%*s", spacing - len, "");
+}
+
 static void print_sample_bts(struct perf_sample *sample,
                             struct perf_evsel *evsel,
                             struct thread *thread,
@@ -567,6 +629,9 @@ static void print_sample_bts(struct perf_sample *sample,
        struct perf_event_attr *attr = &evsel->attr;
        bool print_srcline_last = false;
 
+       if (PRINT_FIELD(CALLINDENT))
+               print_sample_callindent(sample, evsel, thread, al);
+
        /* print branch_from information */
        if (PRINT_FIELD(IP)) {
                unsigned int print_opts = output[attr->type].print_ip_opts;
@@ -603,13 +668,42 @@ static void print_sample_bts(struct perf_sample *sample,
        printf("\n");
 }
 
+static struct {
+       u32 flags;
+       const char *name;
+} sample_flags[] = {
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL, "call"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN, "return"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CONDITIONAL, "jcc"},
+       {PERF_IP_FLAG_BRANCH, "jmp"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_INTERRUPT, "int"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_INTERRUPT, "iret"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_SYSCALLRET, "syscall"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_RETURN | PERF_IP_FLAG_SYSCALLRET, "sysret"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_ASYNC, "async"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC | PERF_IP_FLAG_INTERRUPT, "hw int"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "tx abrt"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "tr strt"},
+       {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "tr end"},
+       {0, NULL}
+};
+
 static void print_sample_flags(u32 flags)
 {
        const char *chars = PERF_IP_FLAG_CHARS;
        const int n = strlen(PERF_IP_FLAG_CHARS);
+       bool in_tx = flags & PERF_IP_FLAG_IN_TX;
+       const char *name = NULL;
        char str[33];
        int i, pos = 0;
 
+       for (i = 0; sample_flags[i].name ; i++) {
+               if (sample_flags[i].flags == (flags & ~PERF_IP_FLAG_IN_TX)) {
+                       name = sample_flags[i].name;
+                       break;
+               }
+       }
+
        for (i = 0; i < n; i++, flags >>= 1) {
                if (flags & 1)
                        str[pos++] = chars[i];
@@ -619,7 +713,11 @@ static void print_sample_flags(u32 flags)
                        str[pos++] = '?';
        }
        str[pos] = 0;
-       printf("  %-4s ", str);
+
+       if (name)
+               printf("  %-7s%4s ", name, in_tx ? "(x)" : "");
+       else
+               printf("  %-11s ", str);
 }
 
 struct printer_data {
@@ -717,7 +815,7 @@ static int perf_evlist__max_name_len(struct perf_evlist *evlist)
        struct perf_evsel *evsel;
        int max = 0;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                int len = strlen(perf_evsel__name(evsel));
 
                max = MAX(len, max);
@@ -942,7 +1040,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
        if (evsel->attr.type >= PERF_TYPE_MAX)
                return 0;
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                if (pos->attr.type == evsel->attr.type && pos != evsel)
                        return 0;
        }
@@ -1668,7 +1766,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
                        snprintf(evname, len + 1, "%s", p);
 
                        match = 0;
-                       evlist__for_each(session->evlist, pos) {
+                       evlist__for_each_entry(session->evlist, pos) {
                                if (!strcmp(perf_evsel__name(pos), evname)) {
                                        match = 1;
                                        break;
@@ -1870,7 +1968,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
        struct stat_round_event *round = &event->stat_round;
        struct perf_evsel *counter;
 
-       evlist__for_each(session->evlist, counter) {
+       evlist__for_each_entry(session->evlist, counter) {
                perf_stat_process_counter(&stat_config, counter);
                process_stat(counter, round->time);
        }
@@ -2017,7 +2115,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                     "comma separated output fields prepend with 'type:'. "
                     "Valid types: hw,sw,trace,raw. "
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-                    "addr,symoff,period,iregs,brstack,brstacksym,flags", parse_output_fields),
+                    "addr,symoff,period,iregs,brstack,brstacksym,flags,"
+                    "callindent", parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
        OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
@@ -2256,6 +2355,9 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
        script.session = session;
        script__setup_sample_type(&script);
 
+       if (output[PERF_TYPE_HARDWARE].fields & PERF_OUTPUT_CALLINDENT)
+               itrace_synth_opts.thread_stack = true;
+
        session->itrace_synth_opts = &itrace_synth_opts;
 
        if (cpu_list) {
index ee7ada78d86f81c5f788feed43f42ec49e33bb96..0c16d20d7e32fa2eb7377247c2e5542d2a5d076a 100644 (file)
 #include "util/thread.h"
 #include "util/thread_map.h"
 #include "util/counts.h"
+#include "util/group.h"
 #include "util/session.h"
 #include "util/tool.h"
+#include "util/group.h"
 #include "asm/bug.h"
 
+#include <api/fs/fs.h>
 #include <stdlib.h>
 #include <sys/prctl.h>
 #include <locale.h>
@@ -98,6 +101,15 @@ static const char * transaction_limited_attrs = {
        "}"
 };
 
+static const char * topdown_attrs[] = {
+       "topdown-total-slots",
+       "topdown-slots-retired",
+       "topdown-recovery-bubbles",
+       "topdown-fetch-bubbles",
+       "topdown-slots-issued",
+       NULL,
+};
+
 static struct perf_evlist      *evsel_list;
 
 static struct target target = {
@@ -112,6 +124,7 @@ static volatile pid_t               child_pid                       = -1;
 static bool                    null_run                        =  false;
 static int                     detailed_run                    =  0;
 static bool                    transaction_run;
+static bool                    topdown_run                     = false;
 static bool                    big_num                         =  true;
 static int                     big_num_opt                     =  -1;
 static const char              *csv_sep                        = NULL;
@@ -124,6 +137,7 @@ static unsigned int         initial_delay                   = 0;
 static unsigned int            unit_width                      = 4; /* strlen("unit") */
 static bool                    forever                         = false;
 static bool                    metric_only                     = false;
+static bool                    force_metric_only               = false;
 static struct timespec         ref_time;
 static struct cpu_map          *aggr_map;
 static aggr_get_id_t           aggr_get_id;
@@ -276,8 +290,12 @@ perf_evsel__write_stat_event(struct perf_evsel *counter, u32 cpu, u32 thread,
 static int read_counter(struct perf_evsel *counter)
 {
        int nthreads = thread_map__nr(evsel_list->threads);
-       int ncpus = perf_evsel__nr_cpus(counter);
-       int cpu, thread;
+       int ncpus, cpu, thread;
+
+       if (target__has_cpu(&target))
+               ncpus = perf_evsel__nr_cpus(counter);
+       else
+               ncpus = 1;
 
        if (!counter->supported)
                return -ENOENT;
@@ -317,7 +335,7 @@ static void read_counters(bool close_counters)
 {
        struct perf_evsel *counter;
 
-       evlist__for_each(evsel_list, counter) {
+       evlist__for_each_entry(evsel_list, counter) {
                if (read_counter(counter))
                        pr_debug("failed to read counter %s\n", counter->name);
 
@@ -403,7 +421,7 @@ static int perf_stat_synthesize_config(bool is_pipe)
         * Synthesize other events stuff not carried within
         * attr event - unit, scale, name
         */
-       evlist__for_each(evsel_list, counter) {
+       evlist__for_each_entry(evsel_list, counter) {
                if (!counter->supported)
                        continue;
 
@@ -536,7 +554,7 @@ static int __run_perf_stat(int argc, const char **argv)
        if (group)
                perf_evlist__set_leader(evsel_list);
 
-       evlist__for_each(evsel_list, counter) {
+       evlist__for_each_entry(evsel_list, counter) {
 try_again:
                if (create_perf_stat_counter(counter) < 0) {
                        /*
@@ -582,7 +600,7 @@ try_again:
        if (perf_evlist__apply_filters(evsel_list, &counter)) {
                error("failed to set filter \"%s\" on event %s with %d (%s)\n",
                        counter->filter, perf_evsel__name(counter), errno,
-                       strerror_r(errno, msg, sizeof(msg)));
+                       str_error_r(errno, msg, sizeof(msg)));
                return -1;
        }
 
@@ -623,7 +641,7 @@ try_again:
                wait(&status);
 
                if (workload_exec_errno) {
-                       const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
+                       const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
                        pr_err("Workload failed: %s\n", emsg);
                        return -1;
                }
@@ -1120,7 +1138,7 @@ static void aggr_update_shadow(void)
 
        for (s = 0; s < aggr_map->nr; s++) {
                id = aggr_map->map[s];
-               evlist__for_each(evsel_list, counter) {
+               evlist__for_each_entry(evsel_list, counter) {
                        val = 0;
                        for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
                                s2 = aggr_get_id(evsel_list->cpus, cpu);
@@ -1159,7 +1177,7 @@ static void print_aggr(char *prefix)
 
                id = aggr_map->map[s];
                first = true;
-               evlist__for_each(evsel_list, counter) {
+               evlist__for_each_entry(evsel_list, counter) {
                        val = ena = run = 0;
                        nr = 0;
                        for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
@@ -1278,7 +1296,7 @@ static void print_no_aggr_metric(char *prefix)
 
                if (prefix)
                        fputs(prefix, stat_config.output);
-               evlist__for_each(evsel_list, counter) {
+               evlist__for_each_entry(evsel_list, counter) {
                        if (first) {
                                aggr_printout(counter, cpu, 0);
                                first = false;
@@ -1302,7 +1320,15 @@ static int aggr_header_lens[] = {
        [AGGR_GLOBAL] = 0,
 };
 
-static void print_metric_headers(char *prefix)
+static const char *aggr_header_csv[] = {
+       [AGGR_CORE]     =       "core,cpus,",
+       [AGGR_SOCKET]   =       "socket,cpus",
+       [AGGR_NONE]     =       "cpu,",
+       [AGGR_THREAD]   =       "comm-pid,",
+       [AGGR_GLOBAL]   =       ""
+};
+
+static void print_metric_headers(const char *prefix, bool no_indent)
 {
        struct perf_stat_output_ctx out;
        struct perf_evsel *counter;
@@ -1313,12 +1339,18 @@ static void print_metric_headers(char *prefix)
        if (prefix)
                fprintf(stat_config.output, "%s", prefix);
 
-       if (!csv_output)
+       if (!csv_output && !no_indent)
                fprintf(stat_config.output, "%*s",
                        aggr_header_lens[stat_config.aggr_mode], "");
+       if (csv_output) {
+               if (stat_config.interval)
+                       fputs("time,", stat_config.output);
+               fputs(aggr_header_csv[stat_config.aggr_mode],
+                       stat_config.output);
+       }
 
        /* Print metrics headers only */
-       evlist__for_each(evsel_list, counter) {
+       evlist__for_each_entry(evsel_list, counter) {
                os.evsel = counter;
                out.ctx = &os;
                out.print_metric = print_metric_header;
@@ -1338,28 +1370,40 @@ static void print_interval(char *prefix, struct timespec *ts)
 
        sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
 
-       if (num_print_interval == 0 && !csv_output && !metric_only) {
+       if (num_print_interval == 0 && !csv_output) {
                switch (stat_config.aggr_mode) {
                case AGGR_SOCKET:
-                       fprintf(output, "#           time socket cpus             counts %*s events\n", unit_width, "unit");
+                       fprintf(output, "#           time socket cpus");
+                       if (!metric_only)
+                               fprintf(output, "             counts %*s events\n", unit_width, "unit");
                        break;
                case AGGR_CORE:
-                       fprintf(output, "#           time core         cpus             counts %*s events\n", unit_width, "unit");
+                       fprintf(output, "#           time core         cpus");
+                       if (!metric_only)
+                               fprintf(output, "             counts %*s events\n", unit_width, "unit");
                        break;
                case AGGR_NONE:
-                       fprintf(output, "#           time CPU                counts %*s events\n", unit_width, "unit");
+                       fprintf(output, "#           time CPU");
+                       if (!metric_only)
+                               fprintf(output, "                counts %*s events\n", unit_width, "unit");
                        break;
                case AGGR_THREAD:
-                       fprintf(output, "#           time             comm-pid                  counts %*s events\n", unit_width, "unit");
+                       fprintf(output, "#           time             comm-pid");
+                       if (!metric_only)
+                               fprintf(output, "                  counts %*s events\n", unit_width, "unit");
                        break;
                case AGGR_GLOBAL:
                default:
-                       fprintf(output, "#           time             counts %*s events\n", unit_width, "unit");
+                       fprintf(output, "#           time");
+                       if (!metric_only)
+                               fprintf(output, "             counts %*s events\n", unit_width, "unit");
                case AGGR_UNSET:
                        break;
                }
        }
 
+       if (num_print_interval == 0 && metric_only)
+               print_metric_headers(" ", true);
        if (++num_print_interval == 25)
                num_print_interval = 0;
 }
@@ -1428,8 +1472,8 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
        if (metric_only) {
                static int num_print_iv;
 
-               if (num_print_iv == 0)
-                       print_metric_headers(prefix);
+               if (num_print_iv == 0 && !interval)
+                       print_metric_headers(prefix, false);
                if (num_print_iv++ == 25)
                        num_print_iv = 0;
                if (stat_config.aggr_mode == AGGR_GLOBAL && prefix)
@@ -1442,11 +1486,11 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
                print_aggr(prefix);
                break;
        case AGGR_THREAD:
-               evlist__for_each(evsel_list, counter)
+               evlist__for_each_entry(evsel_list, counter)
                        print_aggr_thread(counter, prefix);
                break;
        case AGGR_GLOBAL:
-               evlist__for_each(evsel_list, counter)
+               evlist__for_each_entry(evsel_list, counter)
                        print_counter_aggr(counter, prefix);
                if (metric_only)
                        fputc('\n', stat_config.output);
@@ -1455,7 +1499,7 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
                if (metric_only)
                        print_no_aggr_metric(prefix);
                else {
-                       evlist__for_each(evsel_list, counter)
+                       evlist__for_each_entry(evsel_list, counter)
                                print_counter(counter, prefix);
                }
                break;
@@ -1520,6 +1564,14 @@ static int stat__set_big_num(const struct option *opt __maybe_unused,
        return 0;
 }
 
+static int enable_metric_only(const struct option *opt __maybe_unused,
+                             const char *s __maybe_unused, int unset)
+{
+       force_metric_only = true;
+       metric_only = !unset;
+       return 0;
+}
+
 static const struct option stat_options[] = {
        OPT_BOOLEAN('T', "transaction", &transaction_run,
                    "hardware transaction statistics"),
@@ -1578,8 +1630,10 @@ static const struct option stat_options[] = {
                     "aggregate counts per thread", AGGR_THREAD),
        OPT_UINTEGER('D', "delay", &initial_delay,
                     "ms to wait before starting measurement after program start"),
-       OPT_BOOLEAN(0, "metric-only", &metric_only,
-                       "Only print computed metrics. No raw values"),
+       OPT_CALLBACK_NOOPT(0, "metric-only", &metric_only, NULL,
+                       "Only print computed metrics. No raw values", enable_metric_only),
+       OPT_BOOLEAN(0, "topdown", &topdown_run,
+                       "measure topdown level 1 statistics"),
        OPT_END()
 };
 
@@ -1772,12 +1826,62 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
        return 0;
 }
 
+static int topdown_filter_events(const char **attr, char **str, bool use_group)
+{
+       int off = 0;
+       int i;
+       int len = 0;
+       char *s;
+
+       for (i = 0; attr[i]; i++) {
+               if (pmu_have_event("cpu", attr[i])) {
+                       len += strlen(attr[i]) + 1;
+                       attr[i - off] = attr[i];
+               } else
+                       off++;
+       }
+       attr[i - off] = NULL;
+
+       *str = malloc(len + 1 + 2);
+       if (!*str)
+               return -1;
+       s = *str;
+       if (i - off == 0) {
+               *s = 0;
+               return 0;
+       }
+       if (use_group)
+               *s++ = '{';
+       for (i = 0; attr[i]; i++) {
+               strcpy(s, attr[i]);
+               s += strlen(s);
+               *s++ = ',';
+       }
+       if (use_group) {
+               s[-1] = '}';
+               *s = 0;
+       } else
+               s[-1] = 0;
+       return 0;
+}
+
+__weak bool arch_topdown_check_group(bool *warn)
+{
+       *warn = false;
+       return false;
+}
+
+__weak void arch_topdown_group_warn(void)
+{
+}
+
 /*
  * Add default attributes, if there were no attributes specified or
  * if -d/--detailed, -d -d or -d -d -d is used:
  */
 static int add_default_attributes(void)
 {
+       int err;
        struct perf_event_attr default_attrs0[] = {
 
   { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK             },
@@ -1896,7 +2000,6 @@ static int add_default_attributes(void)
                return 0;
 
        if (transaction_run) {
-               int err;
                if (pmu_have_event("cpu", "cycles-ct") &&
                    pmu_have_event("cpu", "el-start"))
                        err = parse_events(evsel_list, transaction_attrs, NULL);
@@ -1909,6 +2012,46 @@ static int add_default_attributes(void)
                return 0;
        }
 
+       if (topdown_run) {
+               char *str = NULL;
+               bool warn = false;
+
+               if (stat_config.aggr_mode != AGGR_GLOBAL &&
+                   stat_config.aggr_mode != AGGR_CORE) {
+                       pr_err("top down event configuration requires --per-core mode\n");
+                       return -1;
+               }
+               stat_config.aggr_mode = AGGR_CORE;
+               if (nr_cgroups || !target__has_cpu(&target)) {
+                       pr_err("top down event configuration requires system-wide mode (-a)\n");
+                       return -1;
+               }
+
+               if (!force_metric_only)
+                       metric_only = true;
+               if (topdown_filter_events(topdown_attrs, &str,
+                               arch_topdown_check_group(&warn)) < 0) {
+                       pr_err("Out of memory\n");
+                       return -1;
+               }
+               if (topdown_attrs[0] && str) {
+                       if (warn)
+                               arch_topdown_group_warn();
+                       err = parse_events(evsel_list, str, NULL);
+                       if (err) {
+                               fprintf(stderr,
+                                       "Cannot set up top down events %s: %d\n",
+                                       str, err);
+                               free(str);
+                               return -1;
+                       }
+               } else {
+                       fprintf(stderr, "System does not support topdown\n");
+                       return -1;
+               }
+               free(str);
+       }
+
        if (!evsel_list->nr_entries) {
                if (target__has_cpu(&target))
                        default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
@@ -2010,7 +2153,7 @@ static int process_stat_round_event(struct perf_tool *tool __maybe_unused,
        const char **argv = session->header.env.cmdline_argv;
        int argc = session->header.env.nr_cmdline;
 
-       evlist__for_each(evsel_list, counter)
+       evlist__for_each_entry(evsel_list, counter)
                perf_stat_process_counter(&stat_config, counter);
 
        if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
index 2a6cc254ad0c3e09f65f49b5b0169890a5579ac0..bd108683fcb87d30a45bda771f2ad665c59723cf 100644 (file)
@@ -22,7 +22,7 @@
 #include "perf.h"
 
 #include "util/annotate.h"
-#include "util/cache.h"
+#include "util/config.h"
 #include "util/color.h"
 #include "util/evlist.h"
 #include "util/evsel.h"
@@ -295,7 +295,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
        hists__output_recalc_col_len(hists, top->print_entries - printed);
        putchar('\n');
        hists__fprintf(hists, false, top->print_entries - printed, win_width,
-                      top->min_percent, stdout);
+                      top->min_percent, stdout, symbol_conf.use_callchain);
 }
 
 static void prompt_integer(int *target, const char *msg)
@@ -479,7 +479,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
 
                                fprintf(stderr, "\nAvailable events:");
 
-                               evlist__for_each(top->evlist, top->sym_evsel)
+                               evlist__for_each_entry(top->evlist, top->sym_evsel)
                                        fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
 
                                prompt_integer(&counter, "Enter details event counter");
@@ -490,7 +490,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
                                        sleep(1);
                                        break;
                                }
-                               evlist__for_each(top->evlist, top->sym_evsel)
+                               evlist__for_each_entry(top->evlist, top->sym_evsel)
                                        if (top->sym_evsel->idx == counter)
                                                break;
                        } else
@@ -583,7 +583,7 @@ static void *display_thread_tui(void *arg)
         * Zooming in/out UIDs. For now juse use whatever the user passed
         * via --uid.
         */
-       evlist__for_each(top->evlist, pos) {
+       evlist__for_each_entry(top->evlist, pos) {
                struct hists *hists = evsel__hists(pos);
                hists->uid_filter_str = top->record_opts.target.uid_str;
        }
@@ -888,7 +888,7 @@ static int perf_top__start_counters(struct perf_top *top)
 
        perf_evlist__config(evlist, opts, &callchain_param);
 
-       evlist__for_each(evlist, counter) {
+       evlist__for_each_entry(evlist, counter) {
 try_again:
                if (perf_evsel__open(counter, top->evlist->cpus,
                                     top->evlist->threads) < 0) {
@@ -907,7 +907,7 @@ try_again:
 
        if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
                ui__error("Failed to mmap with %d (%s)\n",
-                           errno, strerror_r(errno, msg, sizeof(msg)));
+                           errno, str_error_r(errno, msg, sizeof(msg)));
                goto out_err;
        }
 
@@ -1028,7 +1028,7 @@ out_delete:
 
 out_err_cpu_topo: {
        char errbuf[BUFSIZ];
-       const char *err = strerror_r(-ret, errbuf, sizeof(errbuf));
+       const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
 
        ui__error("Could not read the CPU topology map: %s\n", err);
        goto out_delete;
@@ -1295,7 +1295,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
 
        if (perf_evlist__create_maps(top.evlist, target) < 0) {
                ui__error("Couldn't create thread/CPU maps: %s\n",
-                         errno == ENOENT ? "No such process" : strerror_r(errno, errbuf, sizeof(errbuf)));
+                         errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
                goto out_delete_evlist;
        }
 
index 5c50fe70d6b37379bd8b89b05d9df7899b9fc27d..b8c6766301db90ddbe1dfa1fb65d2ab0a024ecc7 100644 (file)
@@ -43,7 +43,6 @@
 #include <linux/err.h>
 #include <linux/filter.h>
 #include <linux/audit.h>
-#include <sys/ptrace.h>
 #include <linux/random.h>
 #include <linux/stringify.h>
 
@@ -334,6 +333,10 @@ static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
 
 #define SCA_FD syscall_arg__scnprintf_fd
 
+#ifndef AT_FDCWD
+#define AT_FDCWD       -100
+#endif
+
 static size_t syscall_arg__scnprintf_fd_at(char *bf, size_t size,
                                           struct syscall_arg *arg)
 {
@@ -1247,7 +1250,7 @@ static int trace__validate_ev_qualifier(struct trace *trace)
 
        i = 0;
 
-       strlist__for_each(pos, trace->ev_qualifier) {
+       strlist__for_each_entry(pos, trace->ev_qualifier) {
                const char *sc = pos->s;
                int id = syscalltbl__id(trace->sctbl, sc);
 
@@ -1601,7 +1604,7 @@ signed_print:
                fprintf(trace->output, ") = %ld", ret);
        } else if (ret < 0 && (sc->fmt->errmsg || sc->fmt->errpid)) {
                char bf[STRERR_BUFSIZE];
-               const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
+               const char *emsg = str_error_r(-ret, bf, sizeof(bf)),
                           *e = audit_errno_to_name(-ret);
 
                fprintf(trace->output, ") = -1 %s %s", e, emsg);
@@ -2402,7 +2405,7 @@ out_error_apply_filters:
        fprintf(trace->output,
                "Failed to set filter \"%s\" on event %s with %d (%s)\n",
                evsel->filter, perf_evsel__name(evsel), errno,
-               strerror_r(errno, errbuf, sizeof(errbuf)));
+               str_error_r(errno, errbuf, sizeof(errbuf)));
        goto out_delete_evlist;
 }
 out_error_mem:
@@ -2483,7 +2486,7 @@ static int trace__replay(struct trace *trace)
                goto out;
        }
 
-       evlist__for_each(session->evlist, evsel) {
+       evlist__for_each_entry(session->evlist, evsel) {
                if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
                    (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
                     evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
@@ -2550,7 +2553,7 @@ static size_t thread__dump_stats(struct thread_trace *ttrace,
        printed += fprintf(fp, "                               (msec)    (msec)    (msec)    (msec)        (%%)\n");
        printed += fprintf(fp, "   --------------- -------- --------- --------- --------- ---------     ------\n");
 
-       resort_rb__for_each(nd, syscall_stats) {
+       resort_rb__for_each_entry(nd, syscall_stats) {
                struct stats *stats = syscall_stats_entry->stats;
                if (stats) {
                        double min = (double)(stats->min) / NSEC_PER_MSEC;
@@ -2627,7 +2630,7 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
                return 0;
        }
 
-       resort_rb__for_each(nd, threads)
+       resort_rb__for_each_entry(nd, threads)
                printed += trace__fprintf_thread(fp, threads_entry->thread, trace);
 
        resort_rb__delete(threads);
@@ -2714,7 +2717,7 @@ static void evlist__set_evsel_handler(struct perf_evlist *evlist, void *handler)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel)
+       evlist__for_each_entry(evlist, evsel)
                evsel->handler = handler;
 }
 
index 5ad0255f8756e20128b6155f174ab85280835e80..24803c58049a20d0d62ab178a140cce2a4256d76 100644 (file)
@@ -73,17 +73,25 @@ endif
 #
 #   make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
 #
+
+libunwind_arch_set_flags = $(eval $(libunwind_arch_set_flags_code))
+define libunwind_arch_set_flags_code
+  FEATURE_CHECK_CFLAGS-libunwind-$(1)  = -I$(LIBUNWIND_DIR)/include
+  FEATURE_CHECK_LDFLAGS-libunwind-$(1) = -L$(LIBUNWIND_DIR)/lib
+endef
+
 ifdef LIBUNWIND_DIR
   LIBUNWIND_CFLAGS  = -I$(LIBUNWIND_DIR)/include
   LIBUNWIND_LDFLAGS = -L$(LIBUNWIND_DIR)/lib
+  LIBUNWIND_ARCHS = x86 x86_64 arm aarch64 debug-frame-arm debug-frame-aarch64
+  $(foreach libunwind_arch,$(LIBUNWIND_ARCHS),$(call libunwind_arch_set_flags,$(libunwind_arch)))
 endif
-LIBUNWIND_LDFLAGS += $(LIBUNWIND_LIBS)
 
 # Set per-feature check compilation flags
 FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
-FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS)
+FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
 FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
-FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS)
+FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
 
 ifeq ($(NO_PERF_REGS),0)
   CFLAGS += -DHAVE_PERF_REGS_SUPPORT
@@ -107,7 +115,7 @@ endif
 FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
 FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
 
-FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
+FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
 # include ARCH specific config
 -include $(src-perf)/arch/$(ARCH)/Makefile
 
@@ -198,11 +206,11 @@ endif
 
 CFLAGS += -I$(src-perf)/util/include
 CFLAGS += -I$(src-perf)/arch/$(ARCH)/include
+CFLAGS += -I$(srctree)/tools/include/uapi
 CFLAGS += -I$(srctree)/tools/include/
-CFLAGS += -I$(srctree)/arch/$(ARCH)/include/uapi
-CFLAGS += -I$(srctree)/arch/$(ARCH)/include
-CFLAGS += -I$(srctree)/include/uapi
-CFLAGS += -I$(srctree)/include
+CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/uapi
+CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include/
+CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/
 
 # $(obj-perf)      for generated common-cmds.h
 # $(obj-perf)/util for generated bison/flex headers
@@ -249,7 +257,7 @@ else
       LIBC_SUPPORT := 1
     endif
     ifeq ($(LIBC_SUPPORT),1)
-      msg := $(warning No libelf found, disables 'probe' tool and BPF support in 'perf record', please install elfutils-libelf-devel/libelf-dev);
+      msg := $(warning No libelf found, disables 'probe' tool and BPF support in 'perf record', please install libelf-dev, libelf-devel or elfutils-libelf-devel);
 
       NO_LIBELF := 1
       NO_DWARF := 1
@@ -301,6 +309,16 @@ ifndef NO_LIBELF
     CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
   endif
 
+  ifeq ($(feature-libelf-gelf_getnote), 1)
+    CFLAGS += -DHAVE_GELF_GETNOTE_SUPPORT
+  else
+    msg := $(warning gelf_getnote() not found on libelf, SDT support disabled);
+  endif
+
+  ifeq ($(feature-libelf-getshdrstrndx), 1)
+    CFLAGS += -DHAVE_ELF_GETSHDRSTRNDX_SUPPORT
+  endif
+
   ifndef NO_DWARF
     ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
       msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled);
@@ -337,6 +355,16 @@ ifndef NO_LIBELF
   endif # NO_LIBBPF
 endif # NO_LIBELF
 
+ifndef NO_SDT
+  ifneq ($(feature-sdt), 1)
+    msg := $(warning No sys/sdt.h found, no SDT events are defined, please install systemtap-sdt-devel or systemtap-sdt-dev);
+    NO_SDT := 1;
+  else
+    CFLAGS += -DHAVE_SDT_EVENT
+    $(call detected,CONFIG_SDT_EVENT)
+  endif
+endif
+
 ifdef PERF_HAVE_JITDUMP
   ifndef NO_DWARF
     $(call detected,CONFIG_JITDUMP)
@@ -351,10 +379,42 @@ ifeq ($(ARCH),powerpc)
 endif
 
 ifndef NO_LIBUNWIND
+  have_libunwind :=
+
+  ifeq ($(feature-libunwind-x86), 1)
+    $(call detected,CONFIG_LIBUNWIND_X86)
+    CFLAGS += -DHAVE_LIBUNWIND_X86_SUPPORT
+    LDFLAGS += -lunwind-x86
+    EXTLIBS_LIBUNWIND += -lunwind-x86
+    have_libunwind = 1
+  endif
+
+  ifeq ($(feature-libunwind-aarch64), 1)
+    $(call detected,CONFIG_LIBUNWIND_AARCH64)
+    CFLAGS += -DHAVE_LIBUNWIND_AARCH64_SUPPORT
+    LDFLAGS += -lunwind-aarch64
+    EXTLIBS_LIBUNWIND += -lunwind-aarch64
+    have_libunwind = 1
+    $(call feature_check,libunwind-debug-frame-aarch64)
+    ifneq ($(feature-libunwind-debug-frame-aarch64), 1)
+      msg := $(warning No debug_frame support found in libunwind-aarch64);
+      CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME_AARCH64
+    endif
+  endif
+
   ifneq ($(feature-libunwind), 1)
     msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR);
+    NO_LOCAL_LIBUNWIND := 1
+  else
+    have_libunwind := 1
+    $(call detected,CONFIG_LOCAL_LIBUNWIND)
+  endif
+
+  ifneq ($(have_libunwind), 1)
     NO_LIBUNWIND := 1
   endif
+else
+  NO_LOCAL_LIBUNWIND := 1
 endif
 
 ifndef NO_LIBBPF
@@ -392,7 +452,7 @@ else
   NO_DWARF_UNWIND := 1
 endif
 
-ifndef NO_LIBUNWIND
+ifndef NO_LOCAL_LIBUNWIND
   ifeq ($(ARCH),$(filter $(ARCH),arm arm64))
     $(call feature_check,libunwind-debug-frame)
     ifneq ($(feature-libunwind-debug-frame), 1)
@@ -403,10 +463,15 @@ ifndef NO_LIBUNWIND
     # non-ARM has no dwarf_find_debug_frame() function:
     CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME
   endif
-  CFLAGS  += -DHAVE_LIBUNWIND_SUPPORT
   EXTLIBS += $(LIBUNWIND_LIBS)
+  LDFLAGS += $(LIBUNWIND_LIBS)
+endif
+
+ifndef NO_LIBUNWIND
+  CFLAGS  += -DHAVE_LIBUNWIND_SUPPORT
   CFLAGS  += $(LIBUNWIND_CFLAGS)
   LDFLAGS += $(LIBUNWIND_LDFLAGS)
+  EXTLIBS += $(EXTLIBS_LIBUNWIND)
 endif
 
 ifndef NO_LIBAUDIT
@@ -437,7 +502,7 @@ endif
 
 ifndef NO_SLANG
   ifneq ($(feature-libslang), 1)
-    msg := $(warning slang not found, disables TUI support. Please install slang-devel or libslang-dev);
+    msg := $(warning slang not found, disables TUI support. Please install slang-devel, libslang-dev or libslang2-dev);
     NO_SLANG := 1
   else
     # Fedora has /usr/include/slang/slang.h, but ubuntu /usr/include/slang.h
index 3573f315f9559cee48cb0cf59530d334c893b3f3..55daefff0d54daae31f6b1134a6a499792a310d7 100644 (file)
@@ -59,7 +59,6 @@ static int get_e_machine(struct jitheader *hdr)
        ssize_t sret;
        char id[16];
        int fd, ret = -1;
-       int m = -1;
        struct {
                uint16_t e_type;
                uint16_t e_machine;
@@ -81,11 +80,7 @@ static int get_e_machine(struct jitheader *hdr)
        if (sret != sizeof(info))
                goto error;
 
-       m = info.e_machine;
-       if (m < 0)
-               m = 0; /* ELF EM_NONE */
-
-       hdr->elf_mach = m;
+       hdr->elf_mach = info.e_machine;
        ret = 0;
 error:
        close(fd);
@@ -491,10 +486,11 @@ jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
                if (sret != 1)
                        goto error;
        }
-       if (padding_count)
+       if (padding_count) {
                sret = fwrite_unlocked(pad_bytes, padding_count, 1, fp);
                if (sret != 1)
                        goto error;
+       }
 
        funlockfile(fp);
        return 0;
index 83a25cef82fdd2747ab0bc7f8b4fdc50c3c65b62..7ed72a475c57b217ff88504fd2903496f2daf25c 100644 (file)
@@ -5,35 +5,18 @@
 #include <sys/types.h>
 #include <sys/syscall.h>
 #include <linux/types.h>
+#include <linux/compiler.h>
 #include <linux/perf_event.h>
 #include <asm/barrier.h>
 
 #if defined(__i386__)
 #define cpu_relax()    asm volatile("rep; nop" ::: "memory");
 #define CPUINFO_PROC   {"model name"}
-#ifndef __NR_perf_event_open
-# define __NR_perf_event_open 336
-#endif
-#ifndef __NR_futex
-# define __NR_futex 240
-#endif
-#ifndef __NR_gettid
-# define __NR_gettid 224
-#endif
 #endif
 
 #if defined(__x86_64__)
 #define cpu_relax()    asm volatile("rep; nop" ::: "memory");
 #define CPUINFO_PROC   {"model name"}
-#ifndef __NR_perf_event_open
-# define __NR_perf_event_open 298
-#endif
-#ifndef __NR_futex
-# define __NR_futex 202
-#endif
-#ifndef __NR_gettid
-# define __NR_gettid 186
-#endif
 #endif
 
 #ifdef __powerpc__
index 15982cee5ef3f3b85b51f470777df5a2ac5ee3d1..64c06961bfe495341c954ffced4aa5e06d5e380e 100644 (file)
@@ -10,7 +10,7 @@
 
 #include "util/env.h"
 #include <subcmd/exec-cmd.h>
-#include "util/cache.h"
+#include "util/config.h"
 #include "util/quote.h"
 #include <subcmd/run-command.h>
 #include "util/parse-events.h"
@@ -139,8 +139,6 @@ struct option options[] = {
        OPT_ARGUMENT("html-path", "html-path"),
        OPT_ARGUMENT("paginate", "paginate"),
        OPT_ARGUMENT("no-pager", "no-pager"),
-       OPT_ARGUMENT("perf-dir", "perf-dir"),
-       OPT_ARGUMENT("work-tree", "work-tree"),
        OPT_ARGUMENT("debugfs-dir", "debugfs-dir"),
        OPT_ARGUMENT("buildid-dir", "buildid-dir"),
        OPT_ARGUMENT("list-cmds", "list-cmds"),
@@ -200,35 +198,6 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                        use_pager = 0;
                        if (envchanged)
                                *envchanged = 1;
-               } else if (!strcmp(cmd, "--perf-dir")) {
-                       if (*argc < 2) {
-                               fprintf(stderr, "No directory given for --perf-dir.\n");
-                               usage(perf_usage_string);
-                       }
-                       setenv(PERF_DIR_ENVIRONMENT, (*argv)[1], 1);
-                       if (envchanged)
-                               *envchanged = 1;
-                       (*argv)++;
-                       (*argc)--;
-                       handled++;
-               } else if (!prefixcmp(cmd, CMD_PERF_DIR)) {
-                       setenv(PERF_DIR_ENVIRONMENT, cmd + strlen(CMD_PERF_DIR), 1);
-                       if (envchanged)
-                               *envchanged = 1;
-               } else if (!strcmp(cmd, "--work-tree")) {
-                       if (*argc < 2) {
-                               fprintf(stderr, "No directory given for --work-tree.\n");
-                               usage(perf_usage_string);
-                       }
-                       setenv(PERF_WORK_TREE_ENVIRONMENT, (*argv)[1], 1);
-                       if (envchanged)
-                               *envchanged = 1;
-                       (*argv)++;
-                       (*argc)--;
-               } else if (!prefixcmp(cmd, CMD_WORK_TREE)) {
-                       setenv(PERF_WORK_TREE_ENVIRONMENT, cmd + strlen(CMD_WORK_TREE), 1);
-                       if (envchanged)
-                               *envchanged = 1;
                } else if (!strcmp(cmd, "--debugfs-dir")) {
                        if (*argc < 2) {
                                fprintf(stderr, "No directory given for --debugfs-dir.\n");
@@ -363,11 +332,6 @@ const char perf_version_string[] = PERF_VERSION;
 
 #define RUN_SETUP      (1<<0)
 #define USE_PAGER      (1<<1)
-/*
- * require working tree to be present -- anything uses this needs
- * RUN_SETUP for reading from the configuration file.
- */
-#define NEED_WORK_TREE (1<<2)
 
 static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
 {
@@ -391,6 +355,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
 
        perf_env__set_cmdline(&perf_env, argc, argv);
        status = p->fn(argc, argv, prefix);
+       perf_config__exit();
        exit_browser(status);
        perf_env__exit(&perf_env);
        bpf__clear();
@@ -409,7 +374,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
        /* Check for ENOSPC and EIO errors.. */
        if (fflush(stdout)) {
                fprintf(stderr, "write failure on standard output: %s",
-                       strerror_r(errno, sbuf, sizeof(sbuf)));
+                       str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out;
        }
        if (ferror(stdout)) {
@@ -418,7 +383,7 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
        }
        if (fclose(stdout)) {
                fprintf(stderr, "close failed on standard output: %s",
-                       strerror_r(errno, sbuf, sizeof(sbuf)));
+                       str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out;
        }
        status = 0;
@@ -532,6 +497,16 @@ void pthread__unblock_sigwinch(void)
        pthread_sigmask(SIG_UNBLOCK, &set, NULL);
 }
 
+#ifdef _SC_LEVEL1_DCACHE_LINESIZE
+#define cache_line_size(cacheline_sizep) *cacheline_sizep = sysconf(_SC_LEVEL1_DCACHE_LINESIZE)
+#else
+static void cache_line_size(int *cacheline_sizep)
+{
+       if (sysfs__read_int("devices/system/cpu/cpu0/cache/index0/coherency_line_size", cacheline_sizep))
+               pr_debug("cannot determine cache line size");
+}
+#endif
+
 int main(int argc, const char **argv)
 {
        const char *cmd;
@@ -544,7 +519,7 @@ int main(int argc, const char **argv)
 
        /* The page_size is placed in util object. */
        page_size = sysconf(_SC_PAGE_SIZE);
-       cacheline_size = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
+       cache_line_size(&cacheline_size);
 
        if (sysctl__read_int("kernel/perf_event_max_stack", &value) == 0)
                sysctl_perf_event_max_stack = value;
@@ -558,6 +533,7 @@ int main(int argc, const char **argv)
 
        srandom(time(NULL));
 
+       perf_config__init();
        perf_config(perf_default_config, NULL);
        set_buildid_dir(NULL);
 
@@ -649,7 +625,7 @@ int main(int argc, const char **argv)
        }
 
        fprintf(stderr, "Failed to run command '%s': %s\n",
-               cmd, strerror_r(errno, sbuf, sizeof(sbuf)));
+               cmd, str_error_r(errno, sbuf, sizeof(sbuf)));
 out:
        return 1;
 }
index cd8f1b150f9ec67ec2e3aef5dd45f6eee7918e89..a7e0f14972444771aad97208978aac529c03566e 100644 (file)
@@ -59,6 +59,8 @@ struct record_opts {
        bool         record_switch_events;
        bool         all_kernel;
        bool         all_user;
+       bool         tail_synthesize;
+       bool         overwrite;
        unsigned int freq;
        unsigned int mmap_pages;
        unsigned int auxtrace_mmap_pages;
diff --git a/tools/perf/python/tracepoint.py b/tools/perf/python/tracepoint.py
new file mode 100755 (executable)
index 0000000..eb4dbed
--- /dev/null
@@ -0,0 +1,47 @@
+#! /usr/bin/python
+# -*- python -*-
+# -*- coding: utf-8 -*-
+
+import perf
+
+class tracepoint(perf.evsel):
+    def __init__(self, sys, name):
+        config = perf.tracepoint(sys, name)
+        perf.evsel.__init__(self,
+                            type   = perf.TYPE_TRACEPOINT,
+                            config = config,
+                            freq = 0, sample_period = 1, wakeup_events = 1,
+                            sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_RAW | perf.SAMPLE_TIME)
+
+def main():
+    tp      = tracepoint("sched", "sched_switch")
+    cpus    = perf.cpu_map()
+    threads = perf.thread_map(-1)
+
+    evlist = perf.evlist(cpus, threads)
+    evlist.add(tp)
+    evlist.open()
+    evlist.mmap()
+
+    while True:
+        evlist.poll(timeout = -1)
+        for cpu in cpus:
+            event = evlist.read_on_cpu(cpu)
+            if not event:
+                continue
+
+            if not isinstance(event, perf.sample_event):
+                continue
+
+            print "time %u prev_comm=%s prev_pid=%d prev_prio=%d prev_state=0x%x ==> next_comm=%s next_pid=%d next_prio=%d" % (
+                   event.sample_time,
+                   event.prev_comm,
+                   event.prev_pid,
+                   event.prev_prio,
+                   event.prev_state,
+                   event.next_comm,
+                   event.next_pid,
+                   event.next_prio)
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/perf/scripts/python/bin/stackcollapse-record b/tools/perf/scripts/python/bin/stackcollapse-record
new file mode 100755 (executable)
index 0000000..9d8f9f0
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+#
+# stackcollapse.py can cover all type of perf samples including
+# the tracepoints, so no special record requirements, just record what
+# you want to analyze.
+#
+perf record "$@"
diff --git a/tools/perf/scripts/python/bin/stackcollapse-report b/tools/perf/scripts/python/bin/stackcollapse-report
new file mode 100755 (executable)
index 0000000..356b965
--- /dev/null
@@ -0,0 +1,3 @@
+#!/bin/sh
+# description: produce callgraphs in short form for scripting use
+perf script -s "$PERF_EXEC_PATH"/scripts/python/stackcollapse.py -- "$@"
diff --git a/tools/perf/scripts/python/stackcollapse.py b/tools/perf/scripts/python/stackcollapse.py
new file mode 100755 (executable)
index 0000000..5a605f7
--- /dev/null
@@ -0,0 +1,125 @@
+# stackcollapse.py - format perf samples with one line per distinct call stack
+#
+# This script's output has two space-separated fields.  The first is a semicolon
+# separated stack including the program name (from the "comm" field) and the
+# function names from the call stack.  The second is a count:
+#
+#  swapper;start_kernel;rest_init;cpu_idle;default_idle;native_safe_halt 2
+#
+# The file is sorted according to the first field.
+#
+# Input may be created and processed using:
+#
+#  perf record -a -g -F 99 sleep 60
+#  perf script report stackcollapse > out.stacks-folded
+#
+# (perf script record stackcollapse works too).
+#
+# Written by Paolo Bonzini <pbonzini@redhat.com>
+# Based on Brendan Gregg's stackcollapse-perf.pl script.
+
+import os
+import sys
+from collections import defaultdict
+from optparse import OptionParser, make_option
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+                '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+from EventClass import *
+
+# command line parsing
+
+option_list = [
+    # formatting options for the bottom entry of the stack
+    make_option("--include-tid", dest="include_tid",
+                 action="store_true", default=False,
+                 help="include thread id in stack"),
+    make_option("--include-pid", dest="include_pid",
+                 action="store_true", default=False,
+                 help="include process id in stack"),
+    make_option("--no-comm", dest="include_comm",
+                 action="store_false", default=True,
+                 help="do not separate stacks according to comm"),
+    make_option("--tidy-java", dest="tidy_java",
+                 action="store_true", default=False,
+                 help="beautify Java signatures"),
+    make_option("--kernel", dest="annotate_kernel",
+                 action="store_true", default=False,
+                 help="annotate kernel functions with _[k]")
+]
+
+parser = OptionParser(option_list=option_list)
+(opts, args) = parser.parse_args()
+
+if len(args) != 0:
+    parser.error("unexpected command line argument")
+if opts.include_tid and not opts.include_comm:
+    parser.error("requesting tid but not comm is invalid")
+if opts.include_pid and not opts.include_comm:
+    parser.error("requesting pid but not comm is invalid")
+
+# event handlers
+
+lines = defaultdict(lambda: 0)
+
+def process_event(param_dict):
+    def tidy_function_name(sym, dso):
+        if sym is None:
+            sym = '[unknown]'
+
+        sym = sym.replace(';', ':')
+        if opts.tidy_java:
+            # the original stackcollapse-perf.pl script gives the
+            # example of converting this:
+            #    Lorg/mozilla/javascript/MemberBox;.<init>(Ljava/lang/reflect/Method;)V
+            # to this:
+            #    org/mozilla/javascript/MemberBox:.init
+            sym = sym.replace('<', '')
+            sym = sym.replace('>', '')
+            if sym[0] == 'L' and sym.find('/'):
+                sym = sym[1:]
+            try:
+                sym = sym[:sym.index('(')]
+            except ValueError:
+                pass
+
+        if opts.annotate_kernel and dso == '[kernel.kallsyms]':
+            return sym + '_[k]'
+        else:
+            return sym
+
+    stack = list()
+    if 'callchain' in param_dict:
+        for entry in param_dict['callchain']:
+            entry.setdefault('sym', dict())
+            entry['sym'].setdefault('name', None)
+            entry.setdefault('dso', None)
+            stack.append(tidy_function_name(entry['sym']['name'],
+                                            entry['dso']))
+    else:
+        param_dict.setdefault('symbol', None)
+        param_dict.setdefault('dso', None)
+        stack.append(tidy_function_name(param_dict['symbol'],
+                                        param_dict['dso']))
+
+    if opts.include_comm:
+        comm = param_dict["comm"].replace(' ', '_')
+        sep = "-"
+        if opts.include_pid:
+            comm = comm + sep + str(param_dict['sample']['pid'])
+            sep = "/"
+        if opts.include_tid:
+            comm = comm + sep + str(param_dict['sample']['tid'])
+        stack.append(comm)
+
+    stack_string = ';'.join(reversed(stack))
+    lines[stack_string] = lines[stack_string] + 1
+
+def trace_end():
+    list = lines.keys()
+    list.sort()
+    for stack in list:
+        print "%s %d" % (stack, lines[stack])
index 66a28982547b3e7ae36359a97fa687acaf101223..cb20ae1c0d350a8547f5d217b25f4895e451be9d 100644 (file)
@@ -39,6 +39,8 @@ perf-y += stat.o
 perf-y += event_update.o
 perf-y += event-times.o
 perf-y += backward-ring-buffer.o
+perf-y += sdt.o
+perf-y += is_printable_array.o
 
 $(OUTPUT)tests/llvm-src-base.c: tests/bpf-script-example.c tests/Build
        $(call rule_mkdir)
index d9ba991a9a30f892201e7d1cd2dc10be612198a6..615780cbfe1d86dce93bc4dfb60e6850a42797b0 100644 (file)
@@ -31,8 +31,8 @@ static int count_samples(struct perf_evlist *evlist, int *sample_count,
        for (i = 0; i < evlist->nr_mmaps; i++) {
                union perf_event *event;
 
-               perf_evlist__mmap_read_catchup(evlist, i);
-               while ((event = perf_evlist__mmap_read_backward(evlist, i)) != NULL) {
+               perf_mmap__read_catchup(&evlist->backward_mmap[i]);
+               while ((event = perf_mmap__read_backward(&evlist->backward_mmap[i])) != NULL) {
                        const u32 type = event->header.type;
 
                        switch (type) {
@@ -60,7 +60,7 @@ static int do_test(struct perf_evlist *evlist, int mmap_pages,
        err = perf_evlist__mmap(evlist, mmap_pages, true);
        if (err < 0) {
                pr_debug("perf_evlist__mmap: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                return TEST_FAIL;
        }
 
@@ -108,7 +108,11 @@ int test__backward_ring_buffer(int subtest __maybe_unused)
        }
 
        bzero(&parse_error, sizeof(parse_error));
-       err = parse_events(evlist, "syscalls:sys_enter_prctl", &parse_error);
+       /*
+        * Set backward bit, ring buffer should be writing from end. Record
+        * it in aux evlist
+        */
+       err = parse_events(evlist, "syscalls:sys_enter_prctl/overwrite/", &parse_error);
        if (err) {
                pr_debug("Failed to parse tracepoint event, try use root\n");
                ret = TEST_SKIP;
@@ -117,14 +121,10 @@ int test__backward_ring_buffer(int subtest __maybe_unused)
 
        perf_evlist__config(evlist, &opts, NULL);
 
-       /* Set backward bit, ring buffer should be writing from end */
-       evlist__for_each(evlist, evsel)
-               evsel->attr.write_backward = 1;
-
        err = perf_evlist__open(evlist);
        if (err < 0) {
                pr_debug("perf_evlist__open: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
index 0ec9c2c031649377a2523deda904ea1a7c9284c0..e53bc91fa2604f8653bdfed80ff1d43e85849ffb 100644 (file)
@@ -31,8 +31,8 @@ struct bpf_map_def SEC("maps") flip_table = {
        .max_entries = 1,
 };
 
-SEC("func=sys_epoll_pwait")
-int bpf_func__sys_epoll_pwait(void *ctx)
+SEC("func=sys_epoll_wait")
+int bpf_func__sys_epoll_wait(void *ctx)
 {
        int ind =0;
        int *flag = bpf_map_lookup_elem(&flip_table, &ind);
index f31eed31c1a9cff8e8307a1f06c3013329871ec6..fc54064b91860edf0b1d173f1c05eee2bac4762c 100644 (file)
 
 #ifdef HAVE_LIBBPF_SUPPORT
 
-static int epoll_pwait_loop(void)
+static int epoll_wait_loop(void)
 {
        int i;
 
        /* Should fail NR_ITERS times */
        for (i = 0; i < NR_ITERS; i++)
-               epoll_pwait(-(i + 1), NULL, 0, 0, NULL);
+               epoll_wait(-(i + 1), NULL, 0, 0);
        return 0;
 }
 
@@ -61,7 +61,7 @@ static struct {
                "[basic_bpf_test]",
                "fix 'perf test LLVM' first",
                "load bpf object failed",
-               &epoll_pwait_loop,
+               &epoll_wait_loop,
                (NR_ITERS + 1) / 2,
        },
 #ifdef HAVE_BPF_PROLOGUE
@@ -143,14 +143,14 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
        err = perf_evlist__open(evlist);
        if (err < 0) {
                pr_debug("perf_evlist__open: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
        err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
        if (err < 0) {
                pr_debug("perf_evlist__mmap: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
index 0e95c20ecf6e730d95d83bc720dc03a4c4fc47a4..10eb30686c9c3d06b80fcc13d176390ee0b934ee 100644 (file)
@@ -14,6 +14,8 @@
 #include <subcmd/parse-options.h>
 #include "symbol.h"
 
+static bool dont_fork;
+
 struct test __weak arch_tests[] = {
        {
                .func = NULL,
@@ -211,6 +213,18 @@ static struct test generic_tests[] = {
                .desc = "Test backward reading from ring buffer",
                .func = test__backward_ring_buffer,
        },
+       {
+               .desc = "Test cpu map print",
+               .func = test__cpu_map_print,
+       },
+       {
+               .desc = "Test SDT event probing",
+               .func = test__sdt_event,
+       },
+       {
+               .desc = "Test is_printable_array function",
+               .func = test__is_printable_array,
+       },
        {
                .func = NULL,
        },
@@ -247,44 +261,51 @@ static bool perf_test__matches(struct test *test, int curr, int argc, const char
 
 static int run_test(struct test *test, int subtest)
 {
-       int status, err = -1, child = fork();
+       int status, err = -1, child = dont_fork ? 0 : fork();
        char sbuf[STRERR_BUFSIZE];
 
        if (child < 0) {
                pr_err("failed to fork test: %s\n",
-                       strerror_r(errno, sbuf, sizeof(sbuf)));
+                       str_error_r(errno, sbuf, sizeof(sbuf)));
                return -1;
        }
 
        if (!child) {
-               pr_debug("test child forked, pid %d\n", getpid());
-               if (!verbose) {
-                       int nullfd = open("/dev/null", O_WRONLY);
-                       if (nullfd >= 0) {
-                               close(STDERR_FILENO);
-                               close(STDOUT_FILENO);
-
-                               dup2(nullfd, STDOUT_FILENO);
-                               dup2(STDOUT_FILENO, STDERR_FILENO);
-                               close(nullfd);
+               if (!dont_fork) {
+                       pr_debug("test child forked, pid %d\n", getpid());
+
+                       if (!verbose) {
+                               int nullfd = open("/dev/null", O_WRONLY);
+
+                               if (nullfd >= 0) {
+                                       close(STDERR_FILENO);
+                                       close(STDOUT_FILENO);
+
+                                       dup2(nullfd, STDOUT_FILENO);
+                                       dup2(STDOUT_FILENO, STDERR_FILENO);
+                                       close(nullfd);
+                               }
+                       } else {
+                               signal(SIGSEGV, sighandler_dump_stack);
+                               signal(SIGFPE, sighandler_dump_stack);
                        }
-               } else {
-                       signal(SIGSEGV, sighandler_dump_stack);
-                       signal(SIGFPE, sighandler_dump_stack);
                }
 
                err = test->func(subtest);
-               exit(err);
+               if (!dont_fork)
+                       exit(err);
        }
 
-       wait(&status);
+       if (!dont_fork) {
+               wait(&status);
 
-       if (WIFEXITED(status)) {
-               err = (signed char)WEXITSTATUS(status);
-               pr_debug("test child finished with %d\n", err);
-       } else if (WIFSIGNALED(status)) {
-               err = -1;
-               pr_debug("test child interrupted\n");
+               if (WIFEXITED(status)) {
+                       err = (signed char)WEXITSTATUS(status);
+                       pr_debug("test child finished with %d\n", err);
+               } else if (WIFSIGNALED(status)) {
+                       err = -1;
+                       pr_debug("test child interrupted\n");
+               }
        }
 
        return err;
@@ -425,6 +446,8 @@ int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_STRING('s', "skip", &skip, "tests", "tests to skip"),
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('F', "dont-fork", &dont_fork,
+                   "Do not fork for testcase"),
        OPT_END()
        };
        const char * const test_subcommands[] = { "list", NULL };
index 4cb6418a8ffc33d6c4b71c66c112420191c68956..f168a85992d0ca3fec6c5abe4f2fd0efc3917224 100644 (file)
@@ -1,5 +1,12 @@
 #include "tests.h"
+#include <stdio.h>
 #include "cpumap.h"
+#include "event.h"
+#include <string.h>
+#include <linux/bitops.h>
+#include "debug.h"
+
+struct machine;
 
 static int process_event_mask(struct perf_tool *tool __maybe_unused,
                         union perf_event *event,
@@ -86,3 +93,27 @@ int test__cpu_map_synthesize(int subtest __maybe_unused)
        cpu_map__put(cpus);
        return 0;
 }
+
+static int cpu_map_print(const char *str)
+{
+       struct cpu_map *map = cpu_map__new(str);
+       char buf[100];
+
+       if (!map)
+               return -1;
+
+       cpu_map__snprint(map, buf, sizeof(buf));
+       return !strcmp(buf, str);
+}
+
+int test__cpu_map_print(int subtest __maybe_unused)
+{
+       TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1"));
+       TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,5"));
+       TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3,5,7,9,11,13,15,17,19,21-40"));
+       TEST_ASSERT_VAL("failed to convert map", cpu_map_print("2-5"));
+       TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
+       TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1,3-6,8-10,24,35-37"));
+       TEST_ASSERT_VAL("failed to convert map", cpu_map_print("1-10,12-20,22-30,32-40"));
+       return 0;
+}
index 8cf0d9e189a8bea36c02c965d74e216d29409089..13725e09ba22447ed97b498e6751a34c2a027cbf 100644 (file)
@@ -251,6 +251,9 @@ int test__dso_data_cache(int subtest __maybe_unused)
        long nr_end, nr = open_files_cnt();
        int dso_cnt, limit, i, fd;
 
+       /* Rest the internal dso open counter limit. */
+       reset_fd_limit();
+
        memset(&machine, 0, sizeof(machine));
 
        /* set as system limit */
@@ -312,6 +315,9 @@ int test__dso_data_reopen(int subtest __maybe_unused)
 #define dso_1 (dsos[1])
 #define dso_2 (dsos[2])
 
+       /* Rest the internal dso open counter limit. */
+       reset_fd_limit();
+
        memset(&machine, 0, sizeof(machine));
 
        /*
index 95fb744f6628b46301163f24fe5679d46c192453..19ef77bd6eb4152b539dc46638d4ef3a8db972e3 100644 (file)
@@ -37,7 +37,7 @@ static int attach__enable_on_exec(struct perf_evlist *evlist)
        err = perf_evlist__open(evlist);
        if (err < 0) {
                pr_debug("perf_evlist__open: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                return err;
        }
 
@@ -200,8 +200,7 @@ static int test_times(int (attach)(struct perf_evlist *),
                 count.ena, count.run);
 
 out_err:
-       if (evlist)
-               perf_evlist__delete(evlist);
+       perf_evlist__delete(evlist);
        return !err ? TEST_OK : TEST_FAIL;
 }
 
index 2de4a4f2c3ed3df38e85e69f19dc68fc14f66f3d..60926a1f6fd7fcf128924759545d7f6620128406 100644 (file)
@@ -80,7 +80,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
        }
 
        err = 0;
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) {
                        --err;
                        pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]);
index c809463edbe51b96891f30bba13226c8f3f9ad99..a2b5ff9bf83d615b67d97a00ec38e04e3f8f6291 100644 (file)
@@ -1,4 +1,5 @@
 #include <api/fd/array.h>
+#include <poll.h>
 #include "util/debug.h"
 #include "tests/tests.h"
 
@@ -36,7 +37,7 @@ int test__fdarray__filter(int subtest __maybe_unused)
        }
 
        fdarray__init_revents(fda, POLLIN);
-       nr_fds = fdarray__filter(fda, POLLHUP, NULL);
+       nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
        if (nr_fds != fda->nr_alloc) {
                pr_debug("\nfdarray__filter()=%d != %d shouldn't have filtered anything",
                         nr_fds, fda->nr_alloc);
@@ -44,7 +45,7 @@ int test__fdarray__filter(int subtest __maybe_unused)
        }
 
        fdarray__init_revents(fda, POLLHUP);
-       nr_fds = fdarray__filter(fda, POLLHUP, NULL);
+       nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
        if (nr_fds != 0) {
                pr_debug("\nfdarray__filter()=%d != %d, should have filtered all fds",
                         nr_fds, fda->nr_alloc);
@@ -57,7 +58,7 @@ int test__fdarray__filter(int subtest __maybe_unused)
 
        pr_debug("\nfiltering all but fda->entries[2]:");
        fdarray__fprintf_prefix(fda, "before", stderr);
-       nr_fds = fdarray__filter(fda, POLLHUP, NULL);
+       nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
        fdarray__fprintf_prefix(fda, " after", stderr);
        if (nr_fds != 1) {
                pr_debug("\nfdarray__filter()=%d != 1, should have left just one event", nr_fds);
@@ -78,7 +79,7 @@ int test__fdarray__filter(int subtest __maybe_unused)
 
        pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):");
        fdarray__fprintf_prefix(fda, "before", stderr);
-       nr_fds = fdarray__filter(fda, POLLHUP, NULL);
+       nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
        fdarray__fprintf_prefix(fda, " after", stderr);
        if (nr_fds != 2) {
                pr_debug("\nfdarray__filter()=%d != 2, should have left just two events",
index a9e3db3afac423a89f589ee995433278172b019e..9fd54b79a7886266889f17f5865d69b46171cbf3 100644 (file)
@@ -216,6 +216,8 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
 
                /* check callchain entries */
                root = &he->callchain->node.rb_root;
+
+               TEST_ASSERT_VAL("callchains expected", !RB_EMPTY_ROOT(root));
                cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
 
                c = 0;
@@ -666,6 +668,8 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
        perf_evsel__set_sample_bit(evsel, CALLCHAIN);
 
        setup_sorting(NULL);
+
+       callchain_param = callchain_param_default;
        callchain_register_param(&callchain_param);
 
        err = add_hist_entries(hists, machine);
index e846f8c420136426fae224ce658946923b126f83..62efb14f3a5a7eabb0dfbc7bae31e90276ef9c54 100644 (file)
@@ -56,7 +56,7 @@ static int add_hist_entries(struct perf_evlist *evlist,
         * (perf [perf] main) will be collapsed to an existing entry
         * so total 9 entries will be in the tree.
         */
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
                        struct hist_entry_iter iter = {
                                .evsel = evsel,
@@ -136,7 +136,7 @@ int test__hists_filter(int subtest __maybe_unused)
        if (err < 0)
                goto out;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                struct hists *hists = evsel__hists(evsel);
 
                hists__collapse_resort(hists, NULL);
index acf5a1301c0771eafaee8775e10ad470fd0d6b07..eddc7407ff8a9341a2e19e859bf1cab8781e8eda 100644 (file)
@@ -72,7 +72,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
         * However the second evsel also has a collapsed entry for
         * "bash [libc] malloc" so total 9 entries will be in the tree.
         */
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                struct hists *hists = evsel__hists(evsel);
 
                for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
@@ -84,7 +84,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
                        if (machine__resolve(machine, &al, &sample) < 0)
                                goto out;
 
-                       he = __hists__add_entry(hists, &al, NULL,
+                       he = hists__add_entry(hists, &al, NULL,
                                                NULL, NULL, &sample, true);
                        if (he == NULL) {
                                addr_location__put(&al);
@@ -103,7 +103,7 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
                        if (machine__resolve(machine, &al, &sample) < 0)
                                goto out;
 
-                       he = __hists__add_entry(hists, &al, NULL,
+                       he = hists__add_entry(hists, &al, NULL,
                                                NULL, NULL, &sample, true);
                        if (he == NULL) {
                                addr_location__put(&al);
@@ -301,7 +301,7 @@ int test__hists_link(int subtest __maybe_unused)
        if (err < 0)
                goto out;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                hists = evsel__hists(evsel);
                hists__collapse_resort(hists, NULL);
 
diff --git a/tools/perf/tests/is_printable_array.c b/tools/perf/tests/is_printable_array.c
new file mode 100644 (file)
index 0000000..42e1339
--- /dev/null
@@ -0,0 +1,36 @@
+#include <linux/compiler.h>
+#include "tests.h"
+#include "debug.h"
+#include "util.h"
+
+int test__is_printable_array(int subtest __maybe_unused)
+{
+       char buf1[] = { 'k', 'r', 4, 'v', 'a', 0 };
+       char buf2[] = { 'k', 'r', 'a', 'v', 4, 0 };
+       struct {
+               char            *buf;
+               unsigned int     len;
+               int              ret;
+       } t[] = {
+               { (char *) "krava",     sizeof("krava"),        1 },
+               { (char *) "krava",     sizeof("krava") - 1,    0 },
+               { (char *) "",          sizeof(""),             1 },
+               { (char *) "",          0,                      0 },
+               { NULL,                 0,                      0 },
+               { buf1,                 sizeof(buf1),           0 },
+               { buf2,                 sizeof(buf2),           0 },
+       };
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(t); i++) {
+               int ret;
+
+               ret = is_printable_array((char *) t[i].buf, t[i].len);
+               if (ret != t[i].ret) {
+                       pr_err("failed: test %u\n", i);
+                       return TEST_FAIL;
+               }
+       }
+
+       return TEST_OK;
+}
index cff564fb4b66761f7f7fdcd5ee10727ad3b08726..b798a4bfd23892677b225e41009a784cd4e06d8d 100644 (file)
@@ -5,6 +5,7 @@
 #include "llvm.h"
 #include "tests.h"
 #include "debug.h"
+#include "util.h"
 
 #ifdef HAVE_LIBBPF_SUPPORT
 static int test__bpf_parsing(void *obj_buf, size_t obj_buf_sz)
index cac15d93aea656f96ad449cba1f9529e42afcbec..143f4d549769abf769c32361f0abecdef7efd880 100644 (file)
@@ -81,6 +81,8 @@ make_no_libbionic   := NO_LIBBIONIC=1
 make_no_auxtrace    := NO_AUXTRACE=1
 make_no_libbpf     := NO_LIBBPF=1
 make_no_libcrypto   := NO_LIBCRYPTO=1
+make_with_babeltrace:= LIBBABELTRACE=1
+make_no_sdt        := NO_SDT=1
 make_tags           := tags
 make_cscope         := cscope
 make_help           := help
@@ -104,7 +106,7 @@ make_minimal        := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
 make_minimal        += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
 make_minimal        += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
 make_minimal        += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
-make_minimal        += NO_LIBCRYPTO=1
+make_minimal        += NO_LIBCRYPTO=1 NO_SDT=1
 
 # $(run) contains all available tests
 run := make_pure
@@ -136,6 +138,7 @@ run += make_no_libaudit
 run += make_no_libbionic
 run += make_no_auxtrace
 run += make_no_libbpf
+run += make_with_babeltrace
 run += make_help
 run += make_doc
 run += make_perf_o
index 359e98fcd94cd1be00dcc085fa18d058c8fb1986..634bce9caebd343278e24f6c32ab3569f9bc003d 100644 (file)
@@ -1,3 +1,6 @@
+/* For the CLR_() macros */
+#include <pthread.h>
+
 #include "evlist.h"
 #include "evsel.h"
 #include "thread_map.h"
@@ -49,7 +52,7 @@ int test__basic_mmap(int subtest __maybe_unused)
        sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
        if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
                pr_debug("sched_setaffinity() failed on CPU %d: %s ",
-                        cpus->map[0], strerror_r(errno, sbuf, sizeof(sbuf)));
+                        cpus->map[0], str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_free_cpus;
        }
 
@@ -79,7 +82,7 @@ int test__basic_mmap(int subtest __maybe_unused)
                if (perf_evsel__open(evsels[i], cpus, threads) < 0) {
                        pr_debug("failed to open counter: %s, "
                                 "tweak /proc/sys/kernel/perf_event_paranoid?\n",
-                                strerror_r(errno, sbuf, sizeof(sbuf)));
+                                str_error_r(errno, sbuf, sizeof(sbuf)));
                        goto out_delete_evlist;
                }
 
@@ -89,7 +92,7 @@ int test__basic_mmap(int subtest __maybe_unused)
 
        if (perf_evlist__mmap(evlist, 128, true) < 0) {
                pr_debug("failed to mmap events: %d (%s)\n", errno,
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
@@ -126,7 +129,7 @@ int test__basic_mmap(int subtest __maybe_unused)
        }
 
        err = 0;
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (nr_events[evsel->idx] != expected_nr_events[evsel->idx]) {
                        pr_debug("expected %d %s events, got %d\n",
                                 expected_nr_events[evsel->idx],
index ad1cb63139a79af0b176fe312294798d487aa2be..c8d9592eb142dd08ef4cf216dd67e6e6614996b7 100644 (file)
@@ -1,3 +1,6 @@
+/* For the CPU_* macros */
+#include <pthread.h>
+
 #include <api/fs/fs.h>
 #include <linux/err.h>
 #include "evsel.h"
@@ -41,7 +44,7 @@ int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
        if (perf_evsel__open(evsel, cpus, threads) < 0) {
                pr_debug("failed to open counter: %s, "
                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_evsel_delete;
        }
 
@@ -62,7 +65,7 @@ int test__openat_syscall_event_on_all_cpus(int subtest __maybe_unused)
                if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
                        pr_debug("sched_setaffinity() failed on CPU %d: %s ",
                                 cpus->map[cpu],
-                                strerror_r(errno, sbuf, sizeof(sbuf)));
+                                str_error_r(errno, sbuf, sizeof(sbuf)));
                        goto out_close_fd;
                }
                for (i = 0; i < ncalls; ++i) {
index 4344fe482c1d2f9a9dec676e236d3200e372e78f..f52239fed361913a219dd563f17ef7b5103fb238 100644 (file)
@@ -6,6 +6,13 @@
 #include "tests.h"
 #include "debug.h"
 
+#ifndef O_DIRECTORY
+#define O_DIRECTORY    00200000
+#endif
+#ifndef AT_FDCWD
+#define AT_FDCWD       -100
+#endif
+
 int test__syscall_openat_tp_fields(int subtest __maybe_unused)
 {
        struct record_opts opts = {
@@ -51,14 +58,14 @@ int test__syscall_openat_tp_fields(int subtest __maybe_unused)
        err = perf_evlist__open(evlist);
        if (err < 0) {
                pr_debug("perf_evlist__open: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
        err = perf_evlist__mmap(evlist, UINT_MAX, false);
        if (err < 0) {
                pr_debug("perf_evlist__mmap: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
index 1184f9ba649927826f3e96cbeb734404e08ae9be..d7414128d7fe40d150dcb2424082dd858f6779d1 100644 (file)
@@ -29,7 +29,7 @@ int test__openat_syscall_event(int subtest __maybe_unused)
        if (perf_evsel__open_per_thread(evsel, threads) < 0) {
                pr_debug("failed to open counter: %s, "
                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_evsel_delete;
        }
 
index 7865f68dc0d82bea12c960c61792e9fd28857305..20c2e641c42265b1606d751c64ffec6bdf2e5d3c 100644 (file)
@@ -32,7 +32,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
        TEST_ASSERT_VAL("wrong number of groups", 0 == evlist->nr_groups);
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                TEST_ASSERT_VAL("wrong type",
                        PERF_TYPE_TRACEPOINT == evsel->attr.type);
                TEST_ASSERT_VAL("wrong sample_type",
@@ -207,7 +207,7 @@ test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist)
 
        TEST_ASSERT_VAL("wrong number of entries", evlist->nr_entries > 1);
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                TEST_ASSERT_VAL("wrong exclude_user",
                                !evsel->attr.exclude_user);
                TEST_ASSERT_VAL("wrong exclude_kernel",
@@ -1783,8 +1783,8 @@ static int test_pmu_events(void)
                struct evlist_test e;
                char name[MAX_NAME];
 
-               if (!strcmp(ent->d_name, ".") ||
-                   !strcmp(ent->d_name, ".."))
+               /* Names containing . are special and cannot be used directly */
+               if (strchr(ent->d_name, '.'))
                        continue;
 
                snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name);
index 294c76b01b417d004fd20c047b624fe04cf61f65..81c6eeaca0f53ec00d9cabca4d13119856b0c719 100644 (file)
@@ -44,8 +44,7 @@ static int process_events(union perf_event **events, size_t count)
        for (i = 0; i < count && !err; i++)
                err = process_event(&evlist, events[i]);
 
-       if (evlist)
-               perf_evlist__delete(evlist);
+       perf_evlist__delete(evlist);
 
        return err;
 }
index b836ee6a8d9bb6a676f65bcfa5243fef840c73e0..8f2e1de6d0eae93d0bb8de2e12602f9898176136 100644 (file)
@@ -1,3 +1,6 @@
+/* For the CLR_() macros */
+#include <pthread.h>
+
 #include <sched.h>
 #include "evlist.h"
 #include "evsel.h"
@@ -104,7 +107,7 @@ int test__PERF_RECORD(int subtest __maybe_unused)
        err = sched__get_first_possible_cpu(evlist->workload.pid, &cpu_mask);
        if (err < 0) {
                pr_debug("sched__get_first_possible_cpu: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
@@ -115,7 +118,7 @@ int test__PERF_RECORD(int subtest __maybe_unused)
         */
        if (sched_setaffinity(evlist->workload.pid, cpu_mask_size, &cpu_mask) < 0) {
                pr_debug("sched_setaffinity: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
@@ -126,7 +129,7 @@ int test__PERF_RECORD(int subtest __maybe_unused)
        err = perf_evlist__open(evlist);
        if (err < 0) {
                pr_debug("perf_evlist__open: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
@@ -138,7 +141,7 @@ int test__PERF_RECORD(int subtest __maybe_unused)
        err = perf_evlist__mmap(evlist, opts.mmap_pages, false);
        if (err < 0) {
                pr_debug("perf_evlist__mmap: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
diff --git a/tools/perf/tests/sdt.c b/tools/perf/tests/sdt.c
new file mode 100644 (file)
index 0000000..f59d210
--- /dev/null
@@ -0,0 +1,115 @@
+#include <stdio.h>
+#include <sys/epoll.h>
+#include <util/util.h>
+#include <util/evlist.h>
+#include <linux/filter.h>
+#include "tests.h"
+#include "debug.h"
+#include "probe-file.h"
+#include "build-id.h"
+
+/* To test SDT event, we need libelf support to scan elf binary */
+#if defined(HAVE_SDT_EVENT) && defined(HAVE_LIBELF_SUPPORT)
+
+#include <sys/sdt.h>
+
+static int target_function(void)
+{
+       DTRACE_PROBE(perf, test_target);
+       return TEST_OK;
+}
+
+/* Copied from builtin-buildid-cache.c */
+static int build_id_cache__add_file(const char *filename)
+{
+       char sbuild_id[SBUILD_ID_SIZE];
+       u8 build_id[BUILD_ID_SIZE];
+       int err;
+
+       err = filename__read_build_id(filename, &build_id, sizeof(build_id));
+       if (err < 0) {
+               pr_debug("Failed to read build id of %s\n", filename);
+               return err;
+       }
+
+       build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+       err = build_id_cache__add_s(sbuild_id, filename, false, false);
+       if (err < 0)
+               pr_debug("Failed to add build id cache of %s\n", filename);
+       return err;
+}
+
+static char *get_self_path(void)
+{
+       char *buf = calloc(PATH_MAX, sizeof(char));
+
+       if (buf && readlink("/proc/self/exe", buf, PATH_MAX) < 0) {
+               pr_debug("Failed to get correct path of perf\n");
+               free(buf);
+               return NULL;
+       }
+       return buf;
+}
+
+static int search_cached_probe(const char *target,
+                              const char *group, const char *event)
+{
+       struct probe_cache *cache = probe_cache__new(target);
+       int ret = 0;
+
+       if (!cache) {
+               pr_debug("Failed to open probe cache of %s\n", target);
+               return -EINVAL;
+       }
+
+       if (!probe_cache__find_by_name(cache, group, event)) {
+               pr_debug("Failed to find %s:%s in the cache\n", group, event);
+               ret = -ENOENT;
+       }
+       probe_cache__delete(cache);
+
+       return ret;
+}
+
+int test__sdt_event(int subtests __maybe_unused)
+{
+       int ret = TEST_FAIL;
+       char __tempdir[] = "./test-buildid-XXXXXX";
+       char *tempdir = NULL, *myself = get_self_path();
+
+       if (myself == NULL || mkdtemp(__tempdir) == NULL) {
+               pr_debug("Failed to make a tempdir for build-id cache\n");
+               goto error;
+       }
+       /* Note that buildid_dir must be an absolute path */
+       tempdir = realpath(__tempdir, NULL);
+
+       /* At first, scan itself */
+       set_buildid_dir(tempdir);
+       if (build_id_cache__add_file(myself) < 0)
+               goto error_rmdir;
+
+       /* Open a cache and make sure the SDT is stored */
+       if (search_cached_probe(myself, "sdt_perf", "test_target") < 0)
+               goto error_rmdir;
+
+       /* TBD: probing on the SDT event and collect logs */
+
+       /* Call the target and get an event */
+       ret = target_function();
+
+error_rmdir:
+       /* Cleanup temporary buildid dir */
+       rm_rf(tempdir);
+error:
+       free(tempdir);
+       free(myself);
+       return ret;
+}
+#else
+int test__sdt_event(int subtests __maybe_unused)
+{
+       pr_debug("Skip SDT event test because SDT support is not compiled\n");
+       return TEST_SKIP;
+}
+#endif
index 36e8ce1550e30f3769cf0c32c60439d8aa750f7a..4c9fd046d57b1772422747fbf7deea8be036d68f 100644 (file)
@@ -70,7 +70,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
 
                err = -errno;
                pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)),
+                        str_error_r(errno, sbuf, sizeof(sbuf)),
                         knob, (u64)attr.sample_freq);
                goto out_delete_evlist;
        }
@@ -78,7 +78,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
        err = perf_evlist__mmap(evlist, 128, true);
        if (err < 0) {
                pr_debug("failed to mmap event: %d (%s)\n", errno,
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
index 39a689bf7574e13010a59bf8dae7b748aa0b63c8..7ddbe267d0acbae827010e6408bde3faf575003e 100644 (file)
@@ -432,7 +432,7 @@ int test__switch_tracking(int subtest __maybe_unused)
        }
 
        /* Check non-tracking events are not tracking */
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel != tracking_evsel) {
                        if (evsel->attr.mmap || evsel->attr.comm) {
                                pr_debug("Non-tracking event is tracking\n");
index 2dfff7ac8ef31eae49b01b62c024ff4ddcf246dc..01a5ba2788c604c8c6b6c6e9f0a0364444f90da0 100644 (file)
@@ -91,13 +91,13 @@ int test__task_exit(int subtest __maybe_unused)
        err = perf_evlist__open(evlist);
        if (err < 0) {
                pr_debug("Couldn't open the evlist: %s\n",
-                        strerror_r(-err, sbuf, sizeof(sbuf)));
+                        str_error_r(-err, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
        if (perf_evlist__mmap(evlist, 128, true) < 0) {
                pr_debug("failed to mmap events: %d (%s)\n", errno,
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                goto out_delete_evlist;
        }
 
index c57e72c826d23588279963553d619565f97bc8be..9bfc0e06c61aaebae51319a896c6ed8a82096445 100644 (file)
@@ -87,6 +87,9 @@ int test__synthesize_stat_round(int subtest);
 int test__event_update(int subtest);
 int test__event_times(int subtest);
 int test__backward_ring_buffer(int subtest);
+int test__cpu_map_print(int subtest);
+int test__sdt_event(int subtest);
+int test__is_printable_array(int subtest);
 
 #if defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
index fccde848fe9c0d0d67f1d6081b7e09ed7fc05610..cee2a2cdc93353fc18915889bd32b9a43cad9c30 100644 (file)
@@ -1,13 +1,20 @@
 #include <sys/types.h>
 #include <unistd.h>
+#include <sys/prctl.h>
 #include "tests.h"
 #include "thread_map.h"
 #include "debug.h"
 
+#define NAME   (const char *) "perf"
+#define NAMEUL (unsigned long) NAME
+
 int test__thread_map(int subtest __maybe_unused)
 {
        struct thread_map *map;
 
+       TEST_ASSERT_VAL("failed to set process name",
+                       !prctl(PR_SET_NAME, NAMEUL, 0, 0, 0));
+
        /* test map on current pid */
        map = thread_map__new_by_pid(getpid());
        TEST_ASSERT_VAL("failed to alloc map", map);
@@ -19,7 +26,7 @@ int test__thread_map(int subtest __maybe_unused)
                        thread_map__pid(map, 0) == getpid());
        TEST_ASSERT_VAL("wrong comm",
                        thread_map__comm(map, 0) &&
-                       !strcmp(thread_map__comm(map, 0), "perf"));
+                       !strcmp(thread_map__comm(map, 0), NAME));
        TEST_ASSERT_VAL("wrong refcnt",
                        atomic_read(&map->refcnt) == 1);
        thread_map__put(map);
@@ -51,7 +58,7 @@ static int process_event(struct perf_tool *tool __maybe_unused,
 
        TEST_ASSERT_VAL("wrong nr",   map->nr == 1);
        TEST_ASSERT_VAL("wrong pid",  map->entries[0].pid == (u64) getpid());
-       TEST_ASSERT_VAL("wrong comm", !strcmp(map->entries[0].comm, "perf"));
+       TEST_ASSERT_VAL("wrong comm", !strcmp(map->entries[0].comm, NAME));
 
        threads = thread_map__new_event(&event->thread_map);
        TEST_ASSERT_VAL("failed to alloc map", threads);
@@ -61,7 +68,7 @@ static int process_event(struct perf_tool *tool __maybe_unused,
                        thread_map__pid(threads, 0) == getpid());
        TEST_ASSERT_VAL("wrong comm",
                        thread_map__comm(threads, 0) &&
-                       !strcmp(thread_map__comm(threads, 0), "perf"));
+                       !strcmp(thread_map__comm(threads, 0), NAME));
        TEST_ASSERT_VAL("wrong refcnt",
                        atomic_read(&threads->refcnt) == 1);
        thread_map__put(threads);
@@ -72,6 +79,9 @@ int test__thread_map_synthesize(int subtest __maybe_unused)
 {
        struct thread_map *threads;
 
+       TEST_ASSERT_VAL("failed to set process name",
+                       !prctl(PR_SET_NAME, NAMEUL, 0, 0, 0));
+
        /* test map on current pid */
        threads = thread_map__new_by_pid(getpid());
        TEST_ASSERT_VAL("failed to alloc map", threads);
index d64f4a9128a1cf1ebaf8495b28e02d07bed6b950..b08f21eb6f4df4eec4e5a2986bba4285a94caf8b 100644 (file)
@@ -1,5 +1,3 @@
-#include <sys/eventfd.h>
-
 #ifndef EFD_SEMAPHORE
 #define EFD_SEMAPHORE          1
 #endif
index 021bb48c63361438f043685c1706834995e8398c..74613703a14eba783f405a3fe655a5c0645d94e2 100644 (file)
@@ -1,3 +1,20 @@
+#include <fcntl.h>
+
+#ifndef LOCK_MAND
+#define LOCK_MAND       32
+#endif
+
+#ifndef LOCK_READ
+#define LOCK_READ       64
+#endif
+
+#ifndef LOCK_WRITE
+#define LOCK_WRITE     128
+#endif
+
+#ifndef LOCK_RW
+#define LOCK_RW                192
+#endif
 
 static size_t syscall_arg__scnprintf_flock(char *bf, size_t size,
                                           struct syscall_arg *arg)
index e2476211f22daf5b469fa2a52834d853f02aead4..bfd3359b09b66b644535a19ccf5500a0df3c4c7a 100644 (file)
@@ -1,5 +1,21 @@
 #include <linux/futex.h>
 
+#ifndef FUTEX_WAIT_BITSET
+#define FUTEX_WAIT_BITSET        9
+#endif
+#ifndef FUTEX_WAKE_BITSET
+#define FUTEX_WAKE_BITSET       10
+#endif
+#ifndef FUTEX_WAIT_REQUEUE_PI
+#define FUTEX_WAIT_REQUEUE_PI   11
+#endif
+#ifndef FUTEX_CMP_REQUEUE_PI
+#define FUTEX_CMP_REQUEUE_PI    12
+#endif
+#ifndef FUTEX_CLOCK_REALTIME
+#define FUTEX_CLOCK_REALTIME   256
+#endif
+
 static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, struct syscall_arg *arg)
 {
        enum syscall_futex_args {
index 3444a4d5382d1fd9ff2fd4f9fb595fed6f60078e..d0a3a8e402e7483c1b4ef81ee27ff9884d629fba 100644 (file)
@@ -1,5 +1,9 @@
 #include <sys/mman.h>
 
+#ifndef PROT_SEM
+#define PROT_SEM 0x8
+#endif
+
 static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
                                               struct syscall_arg *arg)
 {
@@ -16,9 +20,7 @@ static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
        P_MMAP_PROT(EXEC);
        P_MMAP_PROT(READ);
        P_MMAP_PROT(WRITE);
-#ifdef PROT_SEM
        P_MMAP_PROT(SEM);
-#endif
        P_MMAP_PROT(GROWSDOWN);
        P_MMAP_PROT(GROWSUP);
 #undef P_MMAP_PROT
@@ -31,10 +33,31 @@ static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
 
 #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
 
+#ifndef MAP_FIXED
+#define MAP_FIXED                   0x10
+#endif
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS               0x20
+#endif
+
+#ifndef MAP_32BIT
+#define MAP_32BIT                   0x40
+#endif
+
 #ifndef MAP_STACK
-# define MAP_STACK             0x20000
+#define MAP_STACK                0x20000
 #endif
 
+#ifndef MAP_HUGETLB
+#define MAP_HUGETLB              0x40000
+#endif
+
+#ifndef MAP_UNINITIALIZED
+#define MAP_UNINITIALIZED      0x4000000
+#endif
+
+
 static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
                                                struct syscall_arg *arg)
 {
@@ -48,26 +71,20 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
 
        P_MMAP_FLAG(SHARED);
        P_MMAP_FLAG(PRIVATE);
-#ifdef MAP_32BIT
        P_MMAP_FLAG(32BIT);
-#endif
        P_MMAP_FLAG(ANONYMOUS);
        P_MMAP_FLAG(DENYWRITE);
        P_MMAP_FLAG(EXECUTABLE);
        P_MMAP_FLAG(FILE);
        P_MMAP_FLAG(FIXED);
        P_MMAP_FLAG(GROWSDOWN);
-#ifdef MAP_HUGETLB
        P_MMAP_FLAG(HUGETLB);
-#endif
        P_MMAP_FLAG(LOCKED);
        P_MMAP_FLAG(NONBLOCK);
        P_MMAP_FLAG(NORESERVE);
        P_MMAP_FLAG(POPULATE);
        P_MMAP_FLAG(STACK);
-#ifdef MAP_UNINITIALIZED
        P_MMAP_FLAG(UNINITIALIZED);
-#endif
 #undef P_MMAP_FLAG
 
        if (flags)
@@ -78,6 +95,13 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
 
 #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
 
+#ifndef MREMAP_MAYMOVE
+#define MREMAP_MAYMOVE 1
+#endif
+#ifndef MREMAP_FIXED
+#define MREMAP_FIXED 2
+#endif
+
 static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
                                                  struct syscall_arg *arg)
 {
@@ -90,9 +114,7 @@ static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
        }
 
        P_MREMAP_FLAG(MAYMOVE);
-#ifdef MREMAP_FIXED
        P_MREMAP_FLAG(FIXED);
-#endif
 #undef P_MREMAP_FLAG
 
        if (flags)
@@ -107,6 +129,10 @@ static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
 #define MADV_HWPOISON          100
 #endif
 
+#ifndef MADV_SOFT_OFFLINE
+#define MADV_SOFT_OFFLINE      101
+#endif
+
 #ifndef MADV_MERGEABLE
 #define MADV_MERGEABLE          12
 #endif
@@ -115,6 +141,23 @@ static size_t syscall_arg__scnprintf_mremap_flags(char *bf, size_t size,
 #define MADV_UNMERGEABLE        13
 #endif
 
+#ifndef MADV_HUGEPAGE
+#define MADV_HUGEPAGE           14
+#endif
+
+#ifndef MADV_NOHUGEPAGE
+#define MADV_NOHUGEPAGE                 15
+#endif
+
+#ifndef MADV_DONTDUMP
+#define MADV_DONTDUMP           16
+#endif
+
+#ifndef MADV_DODUMP
+#define MADV_DODUMP             17
+#endif
+
+
 static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
                                                      struct syscall_arg *arg)
 {
@@ -131,24 +174,14 @@ static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
        P_MADV_BHV(DONTFORK);
        P_MADV_BHV(DOFORK);
        P_MADV_BHV(HWPOISON);
-#ifdef MADV_SOFT_OFFLINE
        P_MADV_BHV(SOFT_OFFLINE);
-#endif
        P_MADV_BHV(MERGEABLE);
        P_MADV_BHV(UNMERGEABLE);
-#ifdef MADV_HUGEPAGE
        P_MADV_BHV(HUGEPAGE);
-#endif
-#ifdef MADV_NOHUGEPAGE
        P_MADV_BHV(NOHUGEPAGE);
-#endif
-#ifdef MADV_DONTDUMP
        P_MADV_BHV(DONTDUMP);
-#endif
-#ifdef MADV_DODUMP
        P_MADV_BHV(DODUMP);
-#endif
-#undef P_MADV_PHV
+#undef P_MADV_BHV
        default: break;
        }
 
index 07fa8a0acad6a5040a6b2c7856d5c922fbc854f3..1106c8960cc4b2209f3750e08fdf69898a824edd 100644 (file)
@@ -33,7 +33,6 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
        P_MSG_FLAG(OOB);
        P_MSG_FLAG(PEEK);
        P_MSG_FLAG(DONTROUTE);
-       P_MSG_FLAG(TRYHARD);
        P_MSG_FLAG(CTRUNC);
        P_MSG_FLAG(PROBE);
        P_MSG_FLAG(TRUNC);
index 0f3679e0cdcf74ea857e6fe10cb7b110ad1c724f..f55a4597fc3831d9898cd2ac99919ce9e57be840 100644 (file)
@@ -1,3 +1,18 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#ifndef O_DIRECT
+#define O_DIRECT       00040000
+#endif
+
+#ifndef O_DIRECTORY
+#define O_DIRECTORY    00200000
+#endif
+
+#ifndef O_NOATIME
+#define O_NOATIME      01000000
+#endif
 
 static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
                                               struct syscall_arg *arg)
index c205bc608b3cfe02f6cad9ce0a7189d6f54aa6b7..34775295b9b37ae8ddd9976e11c6e7abcf5db9b8 100644 (file)
@@ -9,6 +9,9 @@
 #ifndef SCHED_DEADLINE
 #define SCHED_DEADLINE 6
 #endif
+#ifndef SCHED_RESET_ON_FORK
+#define SCHED_RESET_ON_FORK 0x40000000
+#endif
 
 static size_t syscall_arg__scnprintf_sched_policy(char *bf, size_t size,
                                                  struct syscall_arg *arg)
index 213c5a7e3e92178a667e9df3785fc58b25cdd21b..356441bce27dab5e46219d1f761432538ab3a059 100644 (file)
@@ -1,5 +1,3 @@
-#include <linux/seccomp.h>
-
 #ifndef SECCOMP_SET_MODE_STRICT
 #define SECCOMP_SET_MODE_STRICT 0
 #endif
index af68a9d488bfce964c84e67cf5394e7e13daab29..3eb3edb307a4814d1d11346f94117d3c006e87b7 100644 (file)
@@ -1,5 +1,5 @@
 #include "../util.h"
-#include "../cache.h"
+#include "../config.h"
 #include "../../perf.h"
 #include "libslang.h"
 #include "ui.h"
index 4fc208e82c6fc7b28d99af147d1c75738bc338e6..29dc6d20364e8420d148e3006d50fabeffd13e24 100644 (file)
@@ -8,6 +8,7 @@
 #include "../../util/sort.h"
 #include "../../util/symbol.h"
 #include "../../util/evsel.h"
+#include "../../util/config.h"
 #include <pthread.h>
 
 struct disasm_line_samples {
@@ -222,16 +223,14 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
                        } else if (ins__is_call(dl->ins)) {
                                ui_browser__write_graph(browser, SLSMG_RARROW_CHAR);
                                SLsmg_write_char(' ');
+                       } else if (ins__is_ret(dl->ins)) {
+                               ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
+                               SLsmg_write_char(' ');
                        } else {
                                ui_browser__write_nstring(browser, " ", 2);
                        }
                } else {
-                       if (strcmp(dl->name, "retq")) {
-                               ui_browser__write_nstring(browser, " ", 2);
-                       } else {
-                               ui_browser__write_graph(browser, SLSMG_LARROW_CHAR);
-                               SLsmg_write_char(' ');
-                       }
+                       ui_browser__write_nstring(browser, " ", 2);
                }
 
                disasm_line__scnprintf(dl, bf, sizeof(bf), !annotate_browser__opts.use_offset);
@@ -842,14 +841,14 @@ show_help:
                                ui_helpline__puts("Huh? No selection. Report to linux-kernel@vger.kernel.org");
                        else if (browser->selection->offset == -1)
                                ui_helpline__puts("Actions are only available for assembly lines.");
-                       else if (!browser->selection->ins) {
-                               if (strcmp(browser->selection->name, "retq"))
-                                       goto show_sup_ins;
+                       else if (!browser->selection->ins)
+                               goto show_sup_ins;
+                       else if (ins__is_ret(browser->selection->ins))
                                goto out;
-                       else if (!(annotate_browser__jump(browser) ||
+                       else if (!(annotate_browser__jump(browser) ||
                                     annotate_browser__callq(browser, evsel, hbt))) {
 show_sup_ins:
-                               ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
+                               ui_helpline__puts("Actions are only available for function call/return & jump/branch instructions.");
                        }
                        continue;
                case 't':
index 538bae880bfee592f9ed962a4b99b7a19027c7a8..13d414384739d9673089958aa2daa56de9a5b255 100644 (file)
 #include "../../util/top.h"
 #include "../../arch/common.h"
 
-#include "../browser.h"
+#include "../browsers/hists.h"
 #include "../helpline.h"
 #include "../util.h"
 #include "../ui.h"
 #include "map.h"
 #include "annotate.h"
 
-struct hist_browser {
-       struct ui_browser   b;
-       struct hists        *hists;
-       struct hist_entry   *he_selection;
-       struct map_symbol   *selection;
-       struct hist_browser_timer *hbt;
-       struct pstack       *pstack;
-       struct perf_env *env;
-       int                  print_seq;
-       bool                 show_dso;
-       bool                 show_headers;
-       float                min_pcnt;
-       u64                  nr_non_filtered_entries;
-       u64                  nr_hierarchy_entries;
-       u64                  nr_callchain_rows;
-};
-
 extern void hist_browser__init_hpp(void);
 
-static int hists__browser_title(struct hists *hists,
-                               struct hist_browser_timer *hbt,
-                               char *bf, size_t size);
+static int perf_evsel_browser_title(struct hist_browser *browser,
+                                   char *bf, size_t size);
 static void hist_browser__update_nr_entries(struct hist_browser *hb);
 
 static struct rb_node *hists__filter_entries(struct rb_node *nd,
@@ -585,7 +567,12 @@ static void ui_browser__warn_lost_events(struct ui_browser *browser)
                "Or reduce the sampling frequency.");
 }
 
-static int hist_browser__run(struct hist_browser *browser, const char *help)
+static int hist_browser__title(struct hist_browser *browser, char *bf, size_t size)
+{
+       return browser->title ? browser->title(browser, bf, size) : 0;
+}
+
+int hist_browser__run(struct hist_browser *browser, const char *help)
 {
        int key;
        char title[160];
@@ -595,7 +582,7 @@ static int hist_browser__run(struct hist_browser *browser, const char *help)
        browser->b.entries = &browser->hists->entries;
        browser->b.nr_entries = hist_browser__nr_entries(browser);
 
-       hists__browser_title(browser->hists, hbt, title, sizeof(title));
+       hist_browser__title(browser, title, sizeof(title));
 
        if (ui_browser__show(&browser->b, title, "%s", help) < 0)
                return -1;
@@ -621,8 +608,7 @@ static int hist_browser__run(struct hist_browser *browser, const char *help)
                                ui_browser__warn_lost_events(&browser->b);
                        }
 
-                       hists__browser_title(browser->hists,
-                                            hbt, title, sizeof(title));
+                       hist_browser__title(browser, title, sizeof(title));
                        ui_browser__show_title(&browser->b, title);
                        continue;
                }
@@ -1470,7 +1456,7 @@ static int hist_browser__show_no_entry(struct hist_browser *browser,
                    column++ < browser->b.horiz_scroll)
                        continue;
 
-               ret = fmt->width(fmt, NULL, hists_to_evsel(browser->hists));
+               ret = fmt->width(fmt, NULL, browser->hists);
 
                if (first) {
                        /* for folded sign */
@@ -1531,7 +1517,7 @@ static int hists_browser__scnprintf_headers(struct hist_browser *browser, char *
                if (perf_hpp__should_skip(fmt, hists)  || column++ < browser->b.horiz_scroll)
                        continue;
 
-               ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
+               ret = fmt->header(fmt, &dummy_hpp, hists);
                if (advance_hpp_check(&dummy_hpp, ret))
                        break;
 
@@ -1568,7 +1554,7 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
                if (column++ < browser->b.horiz_scroll)
                        continue;
 
-               ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
+               ret = fmt->header(fmt, &dummy_hpp, hists);
                if (advance_hpp_check(&dummy_hpp, ret))
                        break;
 
@@ -1605,7 +1591,7 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
                        }
                        first_col = false;
 
-                       ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
+                       ret = fmt->header(fmt, &dummy_hpp, hists);
                        dummy_hpp.buf[ret] = '\0';
 
                        start = trim(dummy_hpp.buf);
@@ -1622,21 +1608,38 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
        return ret;
 }
 
-static void hist_browser__show_headers(struct hist_browser *browser)
+static void hists_browser__hierarchy_headers(struct hist_browser *browser)
 {
        char headers[1024];
 
-       if (symbol_conf.report_hierarchy)
-               hists_browser__scnprintf_hierarchy_headers(browser, headers,
-                                                          sizeof(headers));
-       else
-               hists_browser__scnprintf_headers(browser, headers,
-                                                sizeof(headers));
+       hists_browser__scnprintf_hierarchy_headers(browser, headers,
+                                                  sizeof(headers));
+
+       ui_browser__gotorc(&browser->b, 0, 0);
+       ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
+       ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
+}
+
+static void hists_browser__headers(struct hist_browser *browser)
+{
+       char headers[1024];
+
+       hists_browser__scnprintf_headers(browser, headers,
+                                        sizeof(headers));
+
        ui_browser__gotorc(&browser->b, 0, 0);
        ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
        ui_browser__write_nstring(&browser->b, headers, browser->b.width + 1);
 }
 
+static void hist_browser__show_headers(struct hist_browser *browser)
+{
+       if (symbol_conf.report_hierarchy)
+               hists_browser__hierarchy_headers(browser);
+       else
+               hists_browser__headers(browser);
+}
+
 static void ui_browser__hists_init_top(struct ui_browser *browser)
 {
        if (browser->top == NULL) {
@@ -2026,7 +2029,7 @@ static int hist_browser__dump(struct hist_browser *browser)
        fp = fopen(filename, "w");
        if (fp == NULL) {
                char bf[64];
-               const char *err = strerror_r(errno, bf, sizeof(bf));
+               const char *err = str_error_r(errno, bf, sizeof(bf));
                ui_helpline__fpush("Couldn't write to %s: %s", filename, err);
                return -1;
        }
@@ -2039,27 +2042,50 @@ static int hist_browser__dump(struct hist_browser *browser)
        return 0;
 }
 
-static struct hist_browser *hist_browser__new(struct hists *hists,
-                                             struct hist_browser_timer *hbt,
-                                             struct perf_env *env)
+void hist_browser__init(struct hist_browser *browser,
+                       struct hists *hists)
+{
+       struct perf_hpp_fmt *fmt;
+
+       browser->hists                  = hists;
+       browser->b.refresh              = hist_browser__refresh;
+       browser->b.refresh_dimensions   = hist_browser__refresh_dimensions;
+       browser->b.seek                 = ui_browser__hists_seek;
+       browser->b.use_navkeypressed    = true;
+       browser->show_headers           = symbol_conf.show_hist_headers;
+
+       hists__for_each_format(hists, fmt) {
+               perf_hpp__reset_width(fmt, hists);
+               ++browser->b.columns;
+       }
+}
+
+struct hist_browser *hist_browser__new(struct hists *hists)
 {
        struct hist_browser *browser = zalloc(sizeof(*browser));
 
+       if (browser)
+               hist_browser__init(browser, hists);
+
+       return browser;
+}
+
+static struct hist_browser *
+perf_evsel_browser__new(struct perf_evsel *evsel,
+                       struct hist_browser_timer *hbt,
+                       struct perf_env *env)
+{
+       struct hist_browser *browser = hist_browser__new(evsel__hists(evsel));
+
        if (browser) {
-               browser->hists = hists;
-               browser->b.refresh = hist_browser__refresh;
-               browser->b.refresh_dimensions = hist_browser__refresh_dimensions;
-               browser->b.seek = ui_browser__hists_seek;
-               browser->b.use_navkeypressed = true;
-               browser->show_headers = symbol_conf.show_hist_headers;
-               browser->hbt = hbt;
-               browser->env = env;
+               browser->hbt   = hbt;
+               browser->env   = env;
+               browser->title = perf_evsel_browser_title;
        }
-
        return browser;
 }
 
-static void hist_browser__delete(struct hist_browser *browser)
+void hist_browser__delete(struct hist_browser *browser)
 {
        free(browser);
 }
@@ -2080,10 +2106,11 @@ static inline bool is_report_browser(void *timer)
        return timer == NULL;
 }
 
-static int hists__browser_title(struct hists *hists,
-                               struct hist_browser_timer *hbt,
+static int perf_evsel_browser_title(struct hist_browser *browser,
                                char *bf, size_t size)
 {
+       struct hist_browser_timer *hbt = browser->hbt;
+       struct hists *hists = browser->hists;
        char unit;
        int printed;
        const struct dso *dso = hists->dso_filter;
@@ -2640,7 +2667,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                    struct perf_env *env)
 {
        struct hists *hists = evsel__hists(evsel);
-       struct hist_browser *browser = hist_browser__new(hists, hbt, env);
+       struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env);
        struct branch_info *bi;
 #define MAX_OPTIONS  16
        char *options[MAX_OPTIONS];
@@ -2649,7 +2676,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        int key = -1;
        char buf[64];
        int delay_secs = hbt ? hbt->refresh : 0;
-       struct perf_hpp_fmt *fmt;
 
 #define HIST_BROWSER_HELP_COMMON                                       \
        "h/?/F1        Show this window\n"                              \
@@ -2708,18 +2734,6 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        memset(options, 0, sizeof(options));
        memset(actions, 0, sizeof(actions));
 
-       hists__for_each_format(browser->hists, fmt) {
-               perf_hpp__reset_width(fmt, hists);
-               /*
-                * This is done just once, and activates the horizontal scrolling
-                * code in the ui_browser code, it would be better to have a the
-                * counter in the perf_hpp code, but I couldn't find doing it here
-                * works, FIXME by setting this in hist_browser__new, for now, be
-                * clever 8-)
-                */
-               ++browser->b.columns;
-       }
-
        if (symbol_conf.col_width_list_str)
                perf_hpp__set_user_width(symbol_conf.col_width_list_str);
 
@@ -3185,7 +3199,7 @@ static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist,
 
        ui_helpline__push("Press ESC to exit");
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                const char *ev_name = perf_evsel__name(pos);
                size_t line_len = strlen(ev_name) + 7;
 
@@ -3216,7 +3230,7 @@ single_entry:
                struct perf_evsel *pos;
 
                nr_entries = 0;
-               evlist__for_each(evlist, pos) {
+               evlist__for_each_entry(evlist, pos) {
                        if (perf_evsel__is_group_leader(pos))
                                nr_entries++;
                }
diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h
new file mode 100644 (file)
index 0000000..39bd0f2
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _PERF_UI_BROWSER_HISTS_H_
+#define _PERF_UI_BROWSER_HISTS_H_ 1
+
+#include "ui/browser.h"
+
+struct hist_browser {
+       struct ui_browser   b;
+       struct hists        *hists;
+       struct hist_entry   *he_selection;
+       struct map_symbol   *selection;
+       struct hist_browser_timer *hbt;
+       struct pstack       *pstack;
+       struct perf_env     *env;
+       int                  print_seq;
+       bool                 show_dso;
+       bool                 show_headers;
+       float                min_pcnt;
+       u64                  nr_non_filtered_entries;
+       u64                  nr_hierarchy_entries;
+       u64                  nr_callchain_rows;
+
+       /* Get title string. */
+       int                  (*title)(struct hist_browser *browser,
+                            char *bf, size_t size);
+};
+
+struct hist_browser *hist_browser__new(struct hists *hists);
+void hist_browser__delete(struct hist_browser *browser);
+int hist_browser__run(struct hist_browser *browser, const char *help);
+void hist_browser__init(struct hist_browser *browser,
+                       struct hists *hists);
+#endif /* _PERF_UI_BROWSER_HISTS_H_ */
index 932adfaa05af2cf2ea8860f712244d4d647adea3..c5f3677f66797bdbf34c171cfc398009a15b60cd 100644 (file)
@@ -549,7 +549,7 @@ static void perf_gtk__show_hierarchy(GtkWidget *window, struct hists *hists,
                                strcat(buf, "+");
                        first_col = false;
 
-                       fmt->header(fmt, &hpp, hists_to_evsel(hists));
+                       fmt->header(fmt, &hpp, hists);
                        strcat(buf, ltrim(rtrim(hpp.buf)));
                }
        }
@@ -627,7 +627,7 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist,
 
        gtk_container_add(GTK_CONTAINER(window), vbox);
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                struct hists *hists = evsel__hists(pos);
                const char *evname = perf_evsel__name(pos);
                GtkWidget *scrolled_window;
index 52e7fc48af9f488088785b8e216e05f6b22546fc..00b91921edb1ff9fbb25b8b606f533b30b632762 100644 (file)
@@ -1,4 +1,5 @@
 #include "../util.h"
+#include "../../util/util.h"
 #include "../../util/debug.h"
 #include "gtk.h"
 
index 700fb3cfa1c739128bc0c8009039f48e1f964c93..5b74a7eba210b20cb693dbedaee482508adcd2cd 100644 (file)
@@ -5,6 +5,7 @@
 #include "../debug.h"
 #include "helpline.h"
 #include "ui.h"
+#include "../util.h"
 
 char ui_helpline__current[512];
 
index af07ffb129ca5ba24b2459fee6ee659d8c71e97f..4274969ddc89471e962dcf0097314b73963c65be 100644 (file)
@@ -215,9 +215,10 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
 
 static int hpp__width_fn(struct perf_hpp_fmt *fmt,
                         struct perf_hpp *hpp __maybe_unused,
-                        struct perf_evsel *evsel)
+                        struct hists *hists)
 {
        int len = fmt->user_len ?: fmt->len;
+       struct perf_evsel *evsel = hists_to_evsel(hists);
 
        if (symbol_conf.event_group)
                len = max(len, evsel->nr_members * fmt->len);
@@ -229,9 +230,9 @@ static int hpp__width_fn(struct perf_hpp_fmt *fmt,
 }
 
 static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
-                         struct perf_evsel *evsel)
+                         struct hists *hists)
 {
-       int len = hpp__width_fn(fmt, hpp, evsel);
+       int len = hpp__width_fn(fmt, hpp, hists);
        return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
 }
 
@@ -632,7 +633,7 @@ unsigned int hists__sort_list_width(struct hists *hists)
                else
                        ret += 2;
 
-               ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
+               ret += fmt->width(fmt, &dummy_hpp, hists);
        }
 
        if (verbose && hists__has(hists, sym)) /* Addr + origin */
@@ -657,7 +658,7 @@ unsigned int hists__overhead_width(struct hists *hists)
                else
                        ret += 2;
 
-               ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
+               ret += fmt->width(fmt, &dummy_hpp, hists);
        }
 
        return ret;
@@ -765,7 +766,7 @@ int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
        if (!symbol_conf.report_hierarchy)
                return 0;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                hists = evsel__hists(evsel);
 
                perf_hpp_list__for_each_sort_list(list, fmt) {
index ba51fa8a1176d739593a7472ad2e028cd2f6eb91..1f6b0994f4f4190c8498292771f4ecc3bb09c277 100644 (file)
@@ -60,6 +60,13 @@ static inline int setup_gtk_browser(void) { return -1; }
 static inline void exit_gtk_browser(bool wait_for_ok __maybe_unused) {}
 #endif
 
+int stdio__config_color(const struct option *opt __maybe_unused,
+                       const char *mode, int unset __maybe_unused)
+{
+       perf_use_color_default = perf_config_colorbool("color.ui", mode, -1);
+       return 0;
+}
+
 void setup_browser(bool fallback_to_pager)
 {
        if (use_browser < 2 && (!isatty(1) || dump_trace))
index 560eb47d56f945acbbdc188d6a8fb3e407faf41e..f04a6311207935fc022ca0b9557db8639d6c00a9 100644 (file)
@@ -492,14 +492,15 @@ out:
 }
 
 static int hist_entry__fprintf(struct hist_entry *he, size_t size,
-                              struct hists *hists,
-                              char *bf, size_t bfsz, FILE *fp)
+                              char *bf, size_t bfsz, FILE *fp,
+                              bool use_callchain)
 {
        int ret;
        struct perf_hpp hpp = {
                .buf            = bf,
                .size           = size,
        };
+       struct hists *hists = he->hists;
        u64 total_period = hists->stats.total_period;
 
        if (size == 0 || size > bfsz)
@@ -512,7 +513,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
 
        ret = fprintf(fp, "%s\n", bf);
 
-       if (symbol_conf.use_callchain)
+       if (use_callchain)
                ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
 
        return ret;
@@ -548,7 +549,7 @@ static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
                                    struct perf_hpp_list_node, list);
 
        perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
-               fmt->header(fmt, hpp, hists_to_evsel(hists));
+               fmt->header(fmt, hpp, hists);
                fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
        }
 
@@ -568,7 +569,7 @@ static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
                                header_width += fprintf(fp, "+");
                        first_col = false;
 
-                       fmt->header(fmt, hpp, hists_to_evsel(hists));
+                       fmt->header(fmt, hpp, hists);
 
                        header_width += fprintf(fp, "%s", trim(hpp->buf));
                }
@@ -589,7 +590,7 @@ static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
                        fprintf(fp, "%s", sep ?: "..");
                first_col = false;
 
-               width = fmt->width(fmt, hpp, hists_to_evsel(hists));
+               width = fmt->width(fmt, hpp, hists);
                fprintf(fp, "%.*s", width, dots);
        }
 
@@ -606,7 +607,7 @@ static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
                                width++;  /* for '+' sign between column header */
                        first_col = false;
 
-                       width += fmt->width(fmt, hpp, hists_to_evsel(hists));
+                       width += fmt->width(fmt, hpp, hists);
                }
 
                if (width > header_width)
@@ -622,47 +623,31 @@ static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
        return 2;
 }
 
-size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
-                     int max_cols, float min_pcnt, FILE *fp)
+static int
+hists__fprintf_hierarchy_headers(struct hists *hists,
+                                struct perf_hpp *hpp,
+                                FILE *fp)
 {
-       struct perf_hpp_fmt *fmt;
        struct perf_hpp_list_node *fmt_node;
-       struct rb_node *nd;
-       size_t ret = 0;
-       unsigned int width;
-       const char *sep = symbol_conf.field_sep;
-       int nr_rows = 0;
-       char bf[96];
-       struct perf_hpp dummy_hpp = {
-               .buf    = bf,
-               .size   = sizeof(bf),
-       };
-       bool first = true;
-       size_t linesz;
-       char *line = NULL;
-       unsigned indent;
-
-       init_rem_hits();
-
-       hists__for_each_format(hists, fmt)
-               perf_hpp__reset_width(fmt, hists);
-
-       if (symbol_conf.col_width_list_str)
-               perf_hpp__set_user_width(symbol_conf.col_width_list_str);
+       struct perf_hpp_fmt *fmt;
 
-       if (!show_header)
-               goto print_entries;
+       list_for_each_entry(fmt_node, &hists->hpp_formats, list) {
+               perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
+                       perf_hpp__reset_width(fmt, hists);
+       }
 
-       fprintf(fp, "# ");
+       return print_hierarchy_header(hists, hpp, symbol_conf.field_sep, fp);
+}
 
-       if (symbol_conf.report_hierarchy) {
-               list_for_each_entry(fmt_node, &hists->hpp_formats, list) {
-                       perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
-                               perf_hpp__reset_width(fmt, hists);
-               }
-               nr_rows += print_hierarchy_header(hists, &dummy_hpp, sep, fp);
-               goto print_entries;
-       }
+static int
+hists__fprintf_standard_headers(struct hists *hists,
+                               struct perf_hpp *hpp,
+                               FILE *fp)
+{
+       struct perf_hpp_fmt *fmt;
+       unsigned int width;
+       const char *sep = symbol_conf.field_sep;
+       bool first = true;
 
        hists__for_each_format(hists, fmt) {
                if (perf_hpp__should_skip(fmt, hists))
@@ -673,16 +658,14 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                else
                        first = false;
 
-               fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
-               fprintf(fp, "%s", bf);
+               fmt->header(fmt, hpp, hists);
+               fprintf(fp, "%s", hpp->buf);
        }
 
        fprintf(fp, "\n");
-       if (max_rows && ++nr_rows >= max_rows)
-               goto out;
 
        if (sep)
-               goto print_entries;
+               return 1;
 
        first = true;
 
@@ -699,20 +682,60 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
                else
                        first = false;
 
-               width = fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
+               width = fmt->width(fmt, hpp, hists);
                for (i = 0; i < width; i++)
                        fprintf(fp, ".");
        }
 
        fprintf(fp, "\n");
-       if (max_rows && ++nr_rows >= max_rows)
-               goto out;
-
        fprintf(fp, "#\n");
-       if (max_rows && ++nr_rows >= max_rows)
+       return 3;
+}
+
+static int hists__fprintf_headers(struct hists *hists, FILE *fp)
+{
+       char bf[96];
+       struct perf_hpp dummy_hpp = {
+               .buf    = bf,
+               .size   = sizeof(bf),
+       };
+
+       fprintf(fp, "# ");
+
+       if (symbol_conf.report_hierarchy)
+               return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
+       else
+               return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
+
+}
+
+size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
+                     int max_cols, float min_pcnt, FILE *fp,
+                     bool use_callchain)
+{
+       struct perf_hpp_fmt *fmt;
+       struct rb_node *nd;
+       size_t ret = 0;
+       const char *sep = symbol_conf.field_sep;
+       int nr_rows = 0;
+       size_t linesz;
+       char *line = NULL;
+       unsigned indent;
+
+       init_rem_hits();
+
+       hists__for_each_format(hists, fmt)
+               perf_hpp__reset_width(fmt, hists);
+
+       if (symbol_conf.col_width_list_str)
+               perf_hpp__set_user_width(symbol_conf.col_width_list_str);
+
+       if (show_header)
+               nr_rows += hists__fprintf_headers(hists, fp);
+
+       if (max_rows && nr_rows >= max_rows)
                goto out;
 
-print_entries:
        linesz = hists__sort_list_width(hists) + 3 + 1;
        linesz += perf_hpp__color_overhead();
        line = malloc(linesz);
@@ -734,7 +757,7 @@ print_entries:
                if (percent < min_pcnt)
                        continue;
 
-               ret += hist_entry__fprintf(h, max_cols, hists, line, linesz, fp);
+               ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
 
                if (max_rows && ++nr_rows >= max_rows)
                        break;
index 7dfeba0a91f37c33c87b826e9ed79ec4ad59869c..4ea2ba861fc2fe6624fff3b67e2c0663015ff91d 100644 (file)
@@ -1,3 +1,4 @@
+#include <errno.h>
 #include <signal.h>
 #include <stdbool.h>
 #ifdef HAVE_BACKTRACE_SUPPORT
@@ -6,6 +7,7 @@
 
 #include "../../util/cache.h"
 #include "../../util/debug.h"
+#include "../../util/util.h"
 #include "../browser.h"
 #include "../helpline.h"
 #include "../ui.h"
index ab88383f8be85d5278dfaba5b6b6b07b11a6b2d2..4b6fb6c7a542c5c5afadd48c0ebb571c0ad420e0 100644 (file)
@@ -26,4 +26,8 @@ static inline void ui__exit(bool wait_for_ok __maybe_unused) {}
 
 void ui__refresh_dimensions(bool force);
 
+struct option;
+
+int stdio__config_color(const struct option *opt, const char *mode, int unset);
+
 #endif /* _PERF_UI_H_ */
index 8c6c8a0ca642133af03e837c768703a0ec0266a0..2fa7d8b6987314b0e6b1724eef627e22768b380a 100644 (file)
@@ -70,6 +70,7 @@ libperf-y += stat.o
 libperf-y += stat-shadow.o
 libperf-y += record.o
 libperf-y += srcline.o
+libperf-y += str_error_r.o
 libperf-y += data.o
 libperf-y += tsc.o
 libperf-y += cloexec.o
@@ -84,6 +85,7 @@ libperf-y += parse-regs-options.o
 libperf-y += term.o
 libperf-y += help-unknown-cmd.o
 libperf-y += mem-events.o
+libperf-y += vsprintf.o
 
 libperf-$(CONFIG_LIBBPF) += bpf-loader.o
 libperf-$(CONFIG_BPF_PROLOGUE) += bpf-prologue.o
@@ -99,7 +101,10 @@ libperf-$(CONFIG_DWARF) += probe-finder.o
 libperf-$(CONFIG_DWARF) += dwarf-aux.o
 
 libperf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
+libperf-$(CONFIG_LOCAL_LIBUNWIND)    += unwind-libunwind-local.o
 libperf-$(CONFIG_LIBUNWIND)          += unwind-libunwind.o
+libperf-$(CONFIG_LIBUNWIND_X86)      += libunwind/x86_32.o
+libperf-$(CONFIG_LIBUNWIND_AARCH64)  += libunwind/arm64.o
 
 libperf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
 
@@ -108,6 +113,7 @@ libperf-y += scripting-engines/
 libperf-$(CONFIG_ZLIB) += zlib.o
 libperf-$(CONFIG_LZMA) += lzma.o
 libperf-y += demangle-java.o
+libperf-y += demangle-rust.o
 
 ifdef CONFIG_JITDUMP
 libperf-$(CONFIG_LIBELF) += jitdump.o
@@ -170,6 +176,14 @@ $(OUTPUT)util/libstring.o: ../lib/string.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
 
+$(OUTPUT)util/str_error_r.o: ../lib/str_error_r.c FORCE
+       $(call rule_mkdir)
+       $(call if_changed_dep,cc_o_c)
+
 $(OUTPUT)util/hweight.o: ../lib/hweight.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)util/vsprintf.o: ../lib/vsprintf.c FORCE
+       $(call rule_mkdir)
+       $(call if_changed_dep,cc_o_c)
index c0b43ee40d95674fb5a5602c6f287ba2d8d8c5f5..6455471d9cd1e6dc8386919161a5255ec040f783 100644 (file)
@@ -1,4 +1,6 @@
 #include "cache.h"
+#include "util.h"
+#include "config.h"
 
 static const char *alias_key;
 static char *alias_val;
index 7e5a1e8874cee4a263d2577f55fd3ddf7dde3a95..e9825fe825fd65088ef807a3b54ca4a4a198ead7 100644 (file)
@@ -354,6 +354,15 @@ static struct ins_ops nop_ops = {
        .scnprintf = nop__scnprintf,
 };
 
+static struct ins_ops ret_ops = {
+       .scnprintf = ins__raw_scnprintf,
+};
+
+bool ins__is_ret(const struct ins *ins)
+{
+       return ins->ops == &ret_ops;
+}
+
 static struct ins instructions[] = {
        { .name = "add",   .ops  = &mov_ops, },
        { .name = "addl",  .ops  = &mov_ops, },
@@ -444,6 +453,7 @@ static struct ins instructions[] = {
        { .name = "xadd",  .ops  = &mov_ops, },
        { .name = "xbeginl", .ops  = &jump_ops, },
        { .name = "xbeginq", .ops  = &jump_ops, },
+       { .name = "retq",  .ops  = &ret_ops, },
 };
 
 static int ins__key_cmp(const void *name, const void *insp)
@@ -1512,13 +1522,14 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
        const char *d_filename;
        const char *evsel_name = perf_evsel__name(evsel);
        struct annotation *notes = symbol__annotation(sym);
+       struct sym_hist *h = annotation__histogram(notes, evsel->idx);
        struct disasm_line *pos, *queue = NULL;
        u64 start = map__rip_2objdump(map, sym->start);
        int printed = 2, queue_len = 0;
        int more = 0;
        u64 len;
        int width = 8;
-       int namelen, evsel_name_len, graph_dotted_len;
+       int graph_dotted_len;
 
        filename = strdup(dso->long_name);
        if (!filename)
@@ -1530,17 +1541,14 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map,
                d_filename = basename(filename);
 
        len = symbol__size(sym);
-       namelen = strlen(d_filename);
-       evsel_name_len = strlen(evsel_name);
 
        if (perf_evsel__is_group_event(evsel))
                width *= evsel->nr_members;
 
-       printf(" %-*.*s|        Source code & Disassembly of %s for %s\n",
-              width, width, "Percent", d_filename, evsel_name);
+       graph_dotted_len = printf(" %-*.*s|     Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n",
+              width, width, "Percent", d_filename, evsel_name, h->sum);
 
-       graph_dotted_len = width + namelen + evsel_name_len;
-       printf("-%-*.*s-----------------------------------------\n",
+       printf("%-*.*s----\n",
               graph_dotted_len, graph_dotted_len, graph_dotted_line);
 
        if (verbose)
@@ -1676,11 +1684,6 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map,
        return 0;
 }
 
-int hist_entry__annotate(struct hist_entry *he, size_t privsize)
-{
-       return symbol__annotate(he->ms.sym, he->ms.map, privsize);
-}
-
 bool ui__has_annotation(void)
 {
        return use_browser == 1 && perf_hpp_list.sym;
index 9241f8c2b7e16e10f222e5d5fc0be5032ba7f4f1..a23084f54128ee10b55d959bff99c66c3e0fecba 100644 (file)
@@ -48,6 +48,7 @@ struct ins {
 
 bool ins__is_jump(const struct ins *ins);
 bool ins__is_call(const struct ins *ins);
+bool ins__is_ret(const struct ins *ins);
 int ins__scnprintf(struct ins *ins, char *bf, size_t size, struct ins_operands *ops);
 
 struct annotation;
@@ -156,8 +157,6 @@ void symbol__annotate_zero_histograms(struct symbol *sym);
 
 int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize);
 
-int hist_entry__annotate(struct hist_entry *he, size_t privsize);
-
 int symbol__annotate_init(struct map *map, struct symbol *sym);
 int symbol__annotate_printf(struct symbol *sym, struct map *map,
                            struct perf_evsel *evsel, bool full_paths,
index 767989e0e3126714fd956df13df7709098aadc5c..ac5f0d7167e657e505615736cd1875d3226d1317 100644 (file)
@@ -63,6 +63,7 @@ enum itrace_period_type {
  * @calls: limit branch samples to calls (can be combined with @returns)
  * @returns: limit branch samples to returns (can be combined with @calls)
  * @callchain: add callchain to 'instructions' events
+ * @thread_stack: feed branches to the thread_stack
  * @last_branch: add branch context to 'instruction' events
  * @callchain_sz: maximum callchain size
  * @last_branch_sz: branch context size
@@ -82,6 +83,7 @@ struct itrace_synth_opts {
        bool                    calls;
        bool                    returns;
        bool                    callchain;
+       bool                    thread_stack;
        bool                    last_branch;
        unsigned int            callchain_sz;
        unsigned int            last_branch_sz;
index 493307d1414ced463a935ae30ea00bc85c3585e8..1f12e4e4000605e68351194731969df0c9081005 100644 (file)
@@ -37,6 +37,9 @@ DEFINE_PRINT_FN(info, 1)
 DEFINE_PRINT_FN(debug, 1)
 
 struct bpf_prog_priv {
+       bool is_tp;
+       char *sys_name;
+       char *evt_name;
        struct perf_probe_event pev;
        bool need_prologue;
        struct bpf_insn *insns_buf;
@@ -118,6 +121,8 @@ clear_prog_priv(struct bpf_program *prog __maybe_unused,
        cleanup_perf_probe_events(&priv->pev, 1);
        zfree(&priv->insns_buf);
        zfree(&priv->type_mapping);
+       zfree(&priv->sys_name);
+       zfree(&priv->evt_name);
        free(priv);
 }
 
@@ -269,7 +274,8 @@ nextline:
 }
 
 static int
-parse_prog_config(const char *config_str, struct perf_probe_event *pev)
+parse_prog_config(const char *config_str, const char **p_main_str,
+                 bool *is_tp, struct perf_probe_event *pev)
 {
        int err;
        const char *main_str = parse_prog_config_kvpair(config_str, pev);
@@ -277,6 +283,22 @@ parse_prog_config(const char *config_str, struct perf_probe_event *pev)
        if (IS_ERR(main_str))
                return PTR_ERR(main_str);
 
+       *p_main_str = main_str;
+       if (!strchr(main_str, '=')) {
+               /* Is a tracepoint event? */
+               const char *s = strchr(main_str, ':');
+
+               if (!s) {
+                       pr_debug("bpf: '%s' is not a valid tracepoint\n",
+                                config_str);
+                       return -BPF_LOADER_ERRNO__CONFIG;
+               }
+
+               *is_tp = true;
+               return 0;
+       }
+
+       *is_tp = false;
        err = parse_perf_probe_command(main_str, pev);
        if (err < 0) {
                pr_debug("bpf: '%s' is not a valid config string\n",
@@ -292,7 +314,8 @@ config_bpf_program(struct bpf_program *prog)
 {
        struct perf_probe_event *pev = NULL;
        struct bpf_prog_priv *priv = NULL;
-       const char *config_str;
+       const char *config_str, *main_str;
+       bool is_tp = false;
        int err;
 
        /* Initialize per-program probing setting */
@@ -313,10 +336,19 @@ config_bpf_program(struct bpf_program *prog)
        pev = &priv->pev;
 
        pr_debug("bpf: config program '%s'\n", config_str);
-       err = parse_prog_config(config_str, pev);
+       err = parse_prog_config(config_str, &main_str, &is_tp, pev);
        if (err)
                goto errout;
 
+       if (is_tp) {
+               char *s = strchr(main_str, ':');
+
+               priv->is_tp = true;
+               priv->sys_name = strndup(main_str, s - main_str);
+               priv->evt_name = strdup(s + 1);
+               goto set_priv;
+       }
+
        if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
                pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
                         config_str, PERF_BPF_PROBE_GROUP);
@@ -339,7 +371,8 @@ config_bpf_program(struct bpf_program *prog)
        }
        pr_debug("bpf: config '%s' is ok\n", config_str);
 
-       err = bpf_program__set_private(prog, priv, clear_prog_priv);
+set_priv:
+       err = bpf_program__set_priv(prog, priv, clear_prog_priv);
        if (err) {
                pr_debug("Failed to set priv for program '%s'\n", config_str);
                goto errout;
@@ -380,15 +413,14 @@ preproc_gen_prologue(struct bpf_program *prog, int n,
                     struct bpf_insn *orig_insns, int orig_insns_cnt,
                     struct bpf_prog_prep_result *res)
 {
+       struct bpf_prog_priv *priv = bpf_program__priv(prog);
        struct probe_trace_event *tev;
        struct perf_probe_event *pev;
-       struct bpf_prog_priv *priv;
        struct bpf_insn *buf;
        size_t prologue_cnt = 0;
        int i, err;
 
-       err = bpf_program__get_private(prog, (void **)&priv);
-       if (err || !priv)
+       if (IS_ERR(priv) || !priv || priv->is_tp)
                goto errout;
 
        pev = &priv->pev;
@@ -535,17 +567,21 @@ static int map_prologue(struct perf_probe_event *pev, int *mapping,
 
 static int hook_load_preprocessor(struct bpf_program *prog)
 {
+       struct bpf_prog_priv *priv = bpf_program__priv(prog);
        struct perf_probe_event *pev;
-       struct bpf_prog_priv *priv;
        bool need_prologue = false;
        int err, i;
 
-       err = bpf_program__get_private(prog, (void **)&priv);
-       if (err || !priv) {
+       if (IS_ERR(priv) || !priv) {
                pr_debug("Internal error when hook preprocessor\n");
                return -BPF_LOADER_ERRNO__INTERNAL;
        }
 
+       if (priv->is_tp) {
+               priv->need_prologue = false;
+               return 0;
+       }
+
        pev = &priv->pev;
        for (i = 0; i < pev->ntevs; i++) {
                struct probe_trace_event *tev = &pev->tevs[i];
@@ -607,9 +643,18 @@ int bpf__probe(struct bpf_object *obj)
                if (err)
                        goto out;
 
-               err = bpf_program__get_private(prog, (void **)&priv);
-               if (err || !priv)
+               priv = bpf_program__priv(prog);
+               if (IS_ERR(priv) || !priv) {
+                       err = PTR_ERR(priv);
                        goto out;
+               }
+
+               if (priv->is_tp) {
+                       bpf_program__set_tracepoint(prog);
+                       continue;
+               }
+
+               bpf_program__set_kprobe(prog);
                pev = &priv->pev;
 
                err = convert_perf_probe_events(pev, 1);
@@ -645,13 +690,12 @@ int bpf__unprobe(struct bpf_object *obj)
 {
        int err, ret = 0;
        struct bpf_program *prog;
-       struct bpf_prog_priv *priv;
 
        bpf_object__for_each_program(prog, obj) {
+               struct bpf_prog_priv *priv = bpf_program__priv(prog);
                int i;
 
-               err = bpf_program__get_private(prog, (void **)&priv);
-               if (err || !priv)
+               if (IS_ERR(priv) || !priv || priv->is_tp)
                        continue;
 
                for (i = 0; i < priv->pev.ntevs; i++) {
@@ -694,26 +738,34 @@ int bpf__load(struct bpf_object *obj)
        return 0;
 }
 
-int bpf__foreach_tev(struct bpf_object *obj,
-                    bpf_prog_iter_callback_t func,
-                    void *arg)
+int bpf__foreach_event(struct bpf_object *obj,
+                      bpf_prog_iter_callback_t func,
+                      void *arg)
 {
        struct bpf_program *prog;
        int err;
 
        bpf_object__for_each_program(prog, obj) {
+               struct bpf_prog_priv *priv = bpf_program__priv(prog);
                struct probe_trace_event *tev;
                struct perf_probe_event *pev;
-               struct bpf_prog_priv *priv;
                int i, fd;
 
-               err = bpf_program__get_private(prog,
-                               (void **)&priv);
-               if (err || !priv) {
+               if (IS_ERR(priv) || !priv) {
                        pr_debug("bpf: failed to get private field\n");
                        return -BPF_LOADER_ERRNO__INTERNAL;
                }
 
+               if (priv->is_tp) {
+                       fd = bpf_program__fd(prog);
+                       err = (*func)(priv->sys_name, priv->evt_name, fd, arg);
+                       if (err) {
+                               pr_debug("bpf: tracepoint call back failed, stop iterate\n");
+                               return err;
+                       }
+                       continue;
+               }
+
                pev = &priv->pev;
                for (i = 0; i < pev->ntevs; i++) {
                        tev = &pev->tevs[i];
@@ -731,7 +783,7 @@ int bpf__foreach_tev(struct bpf_object *obj,
                                return fd;
                        }
 
-                       err = (*func)(tev, fd, arg);
+                       err = (*func)(tev->group, tev->event, fd, arg);
                        if (err) {
                                pr_debug("bpf: call back failed, stop iterate\n");
                                return err;
@@ -897,15 +949,12 @@ bpf_map_priv__clone(struct bpf_map_priv *priv)
 static int
 bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
 {
-       struct bpf_map_priv *priv;
-       const char *map_name;
-       int err;
+       const char *map_name = bpf_map__name(map);
+       struct bpf_map_priv *priv = bpf_map__priv(map);
 
-       map_name = bpf_map__get_name(map);
-       err = bpf_map__get_private(map, (void **)&priv);
-       if (err) {
+       if (IS_ERR(priv)) {
                pr_debug("Failed to get private from map %s\n", map_name);
-               return err;
+               return PTR_ERR(priv);
        }
 
        if (!priv) {
@@ -916,7 +965,7 @@ bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
                }
                INIT_LIST_HEAD(&priv->ops_list);
 
-               if (bpf_map__set_private(map, priv, bpf_map_priv__clear)) {
+               if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
                        free(priv);
                        return -BPF_LOADER_ERRNO__INTERNAL;
                }
@@ -948,30 +997,26 @@ static int
 __bpf_map__config_value(struct bpf_map *map,
                        struct parse_events_term *term)
 {
-       struct bpf_map_def def;
        struct bpf_map_op *op;
-       const char *map_name;
-       int err;
+       const char *map_name = bpf_map__name(map);
+       const struct bpf_map_def *def = bpf_map__def(map);
 
-       map_name = bpf_map__get_name(map);
-
-       err = bpf_map__get_def(map, &def);
-       if (err) {
+       if (IS_ERR(def)) {
                pr_debug("Unable to get map definition from '%s'\n",
                         map_name);
                return -BPF_LOADER_ERRNO__INTERNAL;
        }
 
-       if (def.type != BPF_MAP_TYPE_ARRAY) {
+       if (def->type != BPF_MAP_TYPE_ARRAY) {
                pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
                         map_name);
                return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
        }
-       if (def.key_size < sizeof(unsigned int)) {
+       if (def->key_size < sizeof(unsigned int)) {
                pr_debug("Map %s has incorrect key size\n", map_name);
                return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
        }
-       switch (def.value_size) {
+       switch (def->value_size) {
        case 1:
        case 2:
        case 4:
@@ -1014,12 +1059,10 @@ __bpf_map__config_event(struct bpf_map *map,
                        struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
-       struct bpf_map_def def;
+       const struct bpf_map_def *def;
        struct bpf_map_op *op;
-       const char *map_name;
-       int err;
+       const char *map_name = bpf_map__name(map);
 
-       map_name = bpf_map__get_name(map);
        evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
        if (!evsel) {
                pr_debug("Event (for '%s') '%s' doesn't exist\n",
@@ -1027,18 +1070,18 @@ __bpf_map__config_event(struct bpf_map *map,
                return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
        }
 
-       err = bpf_map__get_def(map, &def);
-       if (err) {
+       def = bpf_map__def(map);
+       if (IS_ERR(def)) {
                pr_debug("Unable to get map definition from '%s'\n",
                         map_name);
-               return err;
+               return PTR_ERR(def);
        }
 
        /*
         * No need to check key_size and value_size:
         * kernel has already checked them.
         */
-       if (def.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
+       if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
                pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
                         map_name);
                return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
@@ -1087,9 +1130,8 @@ config_map_indices_range_check(struct parse_events_term *term,
                               const char *map_name)
 {
        struct parse_events_array *array = &term->array;
-       struct bpf_map_def def;
+       const struct bpf_map_def *def;
        unsigned int i;
-       int err;
 
        if (!array->nr_ranges)
                return 0;
@@ -1099,8 +1141,8 @@ config_map_indices_range_check(struct parse_events_term *term,
                return -BPF_LOADER_ERRNO__INTERNAL;
        }
 
-       err = bpf_map__get_def(map, &def);
-       if (err) {
+       def = bpf_map__def(map);
+       if (IS_ERR(def)) {
                pr_debug("ERROR: Unable to get map definition from '%s'\n",
                         map_name);
                return -BPF_LOADER_ERRNO__INTERNAL;
@@ -1111,7 +1153,7 @@ config_map_indices_range_check(struct parse_events_term *term,
                size_t length = array->ranges[i].length;
                unsigned int idx = start + length - 1;
 
-               if (idx >= def.max_entries) {
+               if (idx >= def->max_entries) {
                        pr_debug("ERROR: index %d too large\n", idx);
                        return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
                }
@@ -1147,7 +1189,7 @@ bpf__obj_config_map(struct bpf_object *obj,
                goto out;
        }
 
-       map = bpf_object__get_map_by_name(obj, map_name);
+       map = bpf_object__find_map_by_name(obj, map_name);
        if (!map) {
                pr_debug("ERROR: Map %s doesn't exist\n", map_name);
                err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
@@ -1204,14 +1246,14 @@ out:
 }
 
 typedef int (*map_config_func_t)(const char *name, int map_fd,
-                                struct bpf_map_def *pdef,
+                                const struct bpf_map_def *pdef,
                                 struct bpf_map_op *op,
                                 void *pkey, void *arg);
 
 static int
 foreach_key_array_all(map_config_func_t func,
                      void *arg, const char *name,
-                     int map_fd, struct bpf_map_def *pdef,
+                     int map_fd, const struct bpf_map_def *pdef,
                      struct bpf_map_op *op)
 {
        unsigned int i;
@@ -1231,7 +1273,7 @@ foreach_key_array_all(map_config_func_t func,
 static int
 foreach_key_array_ranges(map_config_func_t func, void *arg,
                         const char *name, int map_fd,
-                        struct bpf_map_def *pdef,
+                        const struct bpf_map_def *pdef,
                         struct bpf_map_op *op)
 {
        unsigned int i, j;
@@ -1261,15 +1303,12 @@ bpf_map_config_foreach_key(struct bpf_map *map,
                           void *arg)
 {
        int err, map_fd;
-       const char *name;
        struct bpf_map_op *op;
-       struct bpf_map_def def;
-       struct bpf_map_priv *priv;
+       const struct bpf_map_def *def;
+       const char *name = bpf_map__name(map);
+       struct bpf_map_priv *priv = bpf_map__priv(map);
 
-       name = bpf_map__get_name(map);
-
-       err = bpf_map__get_private(map, (void **)&priv);
-       if (err) {
+       if (IS_ERR(priv)) {
                pr_debug("ERROR: failed to get private from map %s\n", name);
                return -BPF_LOADER_ERRNO__INTERNAL;
        }
@@ -1278,29 +1317,29 @@ bpf_map_config_foreach_key(struct bpf_map *map,
                return 0;
        }
 
-       err = bpf_map__get_def(map, &def);
-       if (err) {
+       def = bpf_map__def(map);
+       if (IS_ERR(def)) {
                pr_debug("ERROR: failed to get definition from map %s\n", name);
                return -BPF_LOADER_ERRNO__INTERNAL;
        }
-       map_fd = bpf_map__get_fd(map);
+       map_fd = bpf_map__fd(map);
        if (map_fd < 0) {
                pr_debug("ERROR: failed to get fd from map %s\n", name);
                return map_fd;
        }
 
        list_for_each_entry(op, &priv->ops_list, list) {
-               switch (def.type) {
+               switch (def->type) {
                case BPF_MAP_TYPE_ARRAY:
                case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
                        switch (op->key_type) {
                        case BPF_MAP_KEY_ALL:
                                err = foreach_key_array_all(func, arg, name,
-                                                           map_fd, &def, op);
+                                                           map_fd, def, op);
                                break;
                        case BPF_MAP_KEY_RANGES:
                                err = foreach_key_array_ranges(func, arg, name,
-                                                              map_fd, &def,
+                                                              map_fd, def,
                                                               op);
                                break;
                        default:
@@ -1410,7 +1449,7 @@ apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
 
 static int
 apply_obj_config_map_for_key(const char *name, int map_fd,
-                            struct bpf_map_def *pdef __maybe_unused,
+                            const struct bpf_map_def *pdef,
                             struct bpf_map_op *op,
                             void *pkey, void *arg __maybe_unused)
 {
@@ -1475,9 +1514,9 @@ int bpf__apply_obj_config(void)
 
 #define bpf__for_each_stdout_map(pos, obj, objtmp)     \
        bpf__for_each_map(pos, obj, objtmp)             \
-               if (bpf_map__get_name(pos) &&           \
+               if (bpf_map__name(pos) &&               \
                        (strcmp("__bpf_stdout__",       \
-                               bpf_map__get_name(pos)) == 0))
+                               bpf_map__name(pos)) == 0))
 
 int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
 {
@@ -1489,10 +1528,9 @@ int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
        bool need_init = false;
 
        bpf__for_each_stdout_map(map, obj, tmp) {
-               struct bpf_map_priv *priv;
+               struct bpf_map_priv *priv = bpf_map__priv(map);
 
-               err = bpf_map__get_private(map, (void **)&priv);
-               if (err)
+               if (IS_ERR(priv))
                        return -BPF_LOADER_ERRNO__INTERNAL;
 
                /*
@@ -1520,10 +1558,9 @@ int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
        }
 
        bpf__for_each_stdout_map(map, obj, tmp) {
-               struct bpf_map_priv *priv;
+               struct bpf_map_priv *priv = bpf_map__priv(map);
 
-               err = bpf_map__get_private(map, (void **)&priv);
-               if (err)
+               if (IS_ERR(priv))
                        return -BPF_LOADER_ERRNO__INTERNAL;
                if (priv)
                        continue;
@@ -1533,7 +1570,7 @@ int bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused)
                        if (!priv)
                                return -ENOMEM;
 
-                       err = bpf_map__set_private(map, priv, bpf_map_priv__clear);
+                       err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
                        if (err) {
                                bpf_map_priv__clear(map, priv);
                                return err;
@@ -1607,7 +1644,7 @@ bpf_loader_strerror(int err, char *buf, size_t size)
                snprintf(buf, size, "Unknown bpf loader error %d", err);
        else
                snprintf(buf, size, "%s",
-                        strerror_r(err, sbuf, sizeof(sbuf)));
+                        str_error_r(err, sbuf, sizeof(sbuf)));
 
        buf[size - 1] = '\0';
        return -1;
@@ -1677,7 +1714,7 @@ int bpf__strerror_load(struct bpf_object *obj,
 {
        bpf__strerror_head(err, buf, size);
        case LIBBPF_ERRNO__KVER: {
-               unsigned int obj_kver = bpf_object__get_kversion(obj);
+               unsigned int obj_kver = bpf_object__kversion(obj);
                unsigned int real_kver;
 
                if (fetch_kernel_version(&real_kver, NULL, 0)) {
index 941e17275aa7d747dfeef655c21a364e627f6f95..f2b737b225f2e0233d9d73aa9944588bf1f4d64e 100644 (file)
@@ -46,7 +46,7 @@ struct bpf_object;
 struct parse_events_term;
 #define PERF_BPF_PROBE_GROUP "perf_bpf_probe"
 
-typedef int (*bpf_prog_iter_callback_t)(struct probe_trace_event *tev,
+typedef int (*bpf_prog_iter_callback_t)(const char *group, const char *event,
                                        int fd, void *arg);
 
 #ifdef HAVE_LIBBPF_SUPPORT
@@ -67,8 +67,8 @@ int bpf__strerror_probe(struct bpf_object *obj, int err,
 int bpf__load(struct bpf_object *obj);
 int bpf__strerror_load(struct bpf_object *obj, int err,
                       char *buf, size_t size);
-int bpf__foreach_tev(struct bpf_object *obj,
-                    bpf_prog_iter_callback_t func, void *arg);
+int bpf__foreach_event(struct bpf_object *obj,
+                      bpf_prog_iter_callback_t func, void *arg);
 
 int bpf__config_obj(struct bpf_object *obj, struct parse_events_term *term,
                    struct perf_evlist *evlist, int *error_pos);
@@ -107,9 +107,9 @@ static inline int bpf__unprobe(struct bpf_object *obj __maybe_unused) { return 0
 static inline int bpf__load(struct bpf_object *obj __maybe_unused) { return 0; }
 
 static inline int
-bpf__foreach_tev(struct bpf_object *obj __maybe_unused,
-                bpf_prog_iter_callback_t func __maybe_unused,
-                void *arg __maybe_unused)
+bpf__foreach_event(struct bpf_object *obj __maybe_unused,
+                  bpf_prog_iter_callback_t func __maybe_unused,
+                  void *arg __maybe_unused)
 {
        return 0;
 }
index 67e5966503b21099688d3f90cf903b2d58d196df..5651f3c12f93f396602b2fa46a4c0a52fa17cf2e 100644 (file)
@@ -17,6 +17,7 @@
 #include "tool.h"
 #include "header.h"
 #include "vdso.h"
+#include "probe-file.h"
 
 
 static bool no_buildid_cache;
@@ -144,7 +145,28 @@ static int asnprintf(char **strp, size_t size, const char *fmt, ...)
        return ret;
 }
 
-static char *build_id__filename(const char *sbuild_id, char *bf, size_t size)
+char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
+                                   size_t size)
+{
+       bool retry_old = true;
+
+       snprintf(bf, size, "%s/%s/%s/kallsyms",
+                buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
+retry:
+       if (!access(bf, F_OK))
+               return bf;
+       if (retry_old) {
+               /* Try old style kallsyms cache */
+               snprintf(bf, size, "%s/%s/%s",
+                        buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
+               retry_old = false;
+               goto retry;
+       }
+
+       return NULL;
+}
+
+char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size)
 {
        char *tmp = bf;
        int ret = asnprintf(&bf, size, "%s/.build-id/%.2s/%s", buildid_dir,
@@ -154,23 +176,107 @@ static char *build_id__filename(const char *sbuild_id, char *bf, size_t size)
        return bf;
 }
 
+char *build_id_cache__origname(const char *sbuild_id)
+{
+       char *linkname;
+       char buf[PATH_MAX];
+       char *ret = NULL, *p;
+       size_t offs = 5;        /* == strlen("../..") */
+
+       linkname = build_id_cache__linkname(sbuild_id, NULL, 0);
+       if (!linkname)
+               return NULL;
+
+       if (readlink(linkname, buf, PATH_MAX) < 0)
+               goto out;
+       /* The link should be "../..<origpath>/<sbuild_id>" */
+       p = strrchr(buf, '/');  /* Cut off the "/<sbuild_id>" */
+       if (p && (p > buf + offs)) {
+               *p = '\0';
+               if (buf[offs + 1] == '[')
+                       offs++; /*
+                                * This is a DSO name, like [kernel.kallsyms].
+                                * Skip the first '/', since this is not the
+                                * cache of a regular file.
+                                */
+               ret = strdup(buf + offs);       /* Skip "../..[/]" */
+       }
+out:
+       free(linkname);
+       return ret;
+}
+
+/* Check if the given build_id cache is valid on current running system */
+static bool build_id_cache__valid_id(char *sbuild_id)
+{
+       char real_sbuild_id[SBUILD_ID_SIZE] = "";
+       char *pathname;
+       int ret = 0;
+       bool result = false;
+
+       pathname = build_id_cache__origname(sbuild_id);
+       if (!pathname)
+               return false;
+
+       if (!strcmp(pathname, DSO__NAME_KALLSYMS))
+               ret = sysfs__sprintf_build_id("/", real_sbuild_id);
+       else if (pathname[0] == '/')
+               ret = filename__sprintf_build_id(pathname, real_sbuild_id);
+       else
+               ret = -EINVAL;  /* Should we support other special DSO cache? */
+       if (ret >= 0)
+               result = (strcmp(sbuild_id, real_sbuild_id) == 0);
+       free(pathname);
+
+       return result;
+}
+
+static const char *build_id_cache__basename(bool is_kallsyms, bool is_vdso)
+{
+       return is_kallsyms ? "kallsyms" : (is_vdso ? "vdso" : "elf");
+}
+
 char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size)
 {
-       char build_id_hex[SBUILD_ID_SIZE];
+       bool is_kallsyms = dso__is_kallsyms((struct dso *)dso);
+       bool is_vdso = dso__is_vdso((struct dso *)dso);
+       char sbuild_id[SBUILD_ID_SIZE];
+       char *linkname;
+       bool alloc = (bf == NULL);
+       int ret;
 
        if (!dso->has_build_id)
                return NULL;
 
-       build_id__sprintf(dso->build_id, sizeof(dso->build_id), build_id_hex);
-       return build_id__filename(build_id_hex, bf, size);
+       build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
+       linkname = build_id_cache__linkname(sbuild_id, NULL, 0);
+       if (!linkname)
+               return NULL;
+
+       /* Check if old style build_id cache */
+       if (is_regular_file(linkname))
+               ret = asnprintf(&bf, size, "%s", linkname);
+       else
+               ret = asnprintf(&bf, size, "%s/%s", linkname,
+                        build_id_cache__basename(is_kallsyms, is_vdso));
+       if (ret < 0 || (!alloc && size < (unsigned int)ret))
+               bf = NULL;
+       free(linkname);
+
+       return bf;
 }
 
 bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
 {
-       char *id_name, *ch;
+       char *id_name = NULL, *ch;
        struct stat sb;
+       char sbuild_id[SBUILD_ID_SIZE];
 
-       id_name = dso__build_id_filename(dso, bf, size);
+       if (!dso->has_build_id)
+               goto err;
+
+       build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
+       id_name = build_id_cache__linkname(sbuild_id, NULL, 0);
        if (!id_name)
                goto err;
        if (access(id_name, F_OK))
@@ -194,18 +300,14 @@ bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size)
        if (ch - 3 < bf)
                goto err;
 
+       free(id_name);
        return strncmp(".ko", ch - 3, 3) == 0;
 err:
-       /*
-        * If dso__build_id_filename work, get id_name again,
-        * because id_name points to bf and is broken.
-        */
-       if (id_name)
-               id_name = dso__build_id_filename(dso, bf, size);
        pr_err("Invalid build id: %s\n", id_name ? :
                                         dso->long_name ? :
                                         dso->short_name ? :
                                         "[unknown]");
+       free(id_name);
        return false;
 }
 
@@ -340,8 +442,132 @@ void disable_buildid_cache(void)
        no_buildid_cache = true;
 }
 
-static char *build_id_cache__dirname_from_path(const char *name,
-                                              bool is_kallsyms, bool is_vdso)
+static bool lsdir_bid_head_filter(const char *name __maybe_unused,
+                                 struct dirent *d __maybe_unused)
+{
+       return (strlen(d->d_name) == 2) &&
+               isxdigit(d->d_name[0]) && isxdigit(d->d_name[1]);
+}
+
+static bool lsdir_bid_tail_filter(const char *name __maybe_unused,
+                                 struct dirent *d __maybe_unused)
+{
+       int i = 0;
+       while (isxdigit(d->d_name[i]) && i < SBUILD_ID_SIZE - 3)
+               i++;
+       return (i == SBUILD_ID_SIZE - 3) && (d->d_name[i] == '\0');
+}
+
+struct strlist *build_id_cache__list_all(bool validonly)
+{
+       struct strlist *toplist, *linklist = NULL, *bidlist;
+       struct str_node *nd, *nd2;
+       char *topdir, *linkdir = NULL;
+       char sbuild_id[SBUILD_ID_SIZE];
+
+       /* for filename__ functions */
+       if (validonly)
+               symbol__init(NULL);
+
+       /* Open the top-level directory */
+       if (asprintf(&topdir, "%s/.build-id/", buildid_dir) < 0)
+               return NULL;
+
+       bidlist = strlist__new(NULL, NULL);
+       if (!bidlist)
+               goto out;
+
+       toplist = lsdir(topdir, lsdir_bid_head_filter);
+       if (!toplist) {
+               pr_debug("Error in lsdir(%s): %d\n", topdir, errno);
+               /* If there is no buildid cache, return an empty list */
+               if (errno == ENOENT)
+                       goto out;
+               goto err_out;
+       }
+
+       strlist__for_each_entry(nd, toplist) {
+               if (asprintf(&linkdir, "%s/%s", topdir, nd->s) < 0)
+                       goto err_out;
+               /* Open the lower-level directory */
+               linklist = lsdir(linkdir, lsdir_bid_tail_filter);
+               if (!linklist) {
+                       pr_debug("Error in lsdir(%s): %d\n", linkdir, errno);
+                       goto err_out;
+               }
+               strlist__for_each_entry(nd2, linklist) {
+                       if (snprintf(sbuild_id, SBUILD_ID_SIZE, "%s%s",
+                                    nd->s, nd2->s) != SBUILD_ID_SIZE - 1)
+                               goto err_out;
+                       if (validonly && !build_id_cache__valid_id(sbuild_id))
+                               continue;
+                       if (strlist__add(bidlist, sbuild_id) < 0)
+                               goto err_out;
+               }
+               strlist__delete(linklist);
+               zfree(&linkdir);
+       }
+
+out_free:
+       strlist__delete(toplist);
+out:
+       free(topdir);
+
+       return bidlist;
+
+err_out:
+       strlist__delete(linklist);
+       zfree(&linkdir);
+       strlist__delete(bidlist);
+       bidlist = NULL;
+       goto out_free;
+}
+
+static bool str_is_build_id(const char *maybe_sbuild_id, size_t len)
+{
+       size_t i;
+
+       for (i = 0; i < len; i++) {
+               if (!isxdigit(maybe_sbuild_id[i]))
+                       return false;
+       }
+       return true;
+}
+
+/* Return the valid complete build-id */
+char *build_id_cache__complement(const char *incomplete_sbuild_id)
+{
+       struct strlist *bidlist;
+       struct str_node *nd, *cand = NULL;
+       char *sbuild_id = NULL;
+       size_t len = strlen(incomplete_sbuild_id);
+
+       if (len >= SBUILD_ID_SIZE ||
+           !str_is_build_id(incomplete_sbuild_id, len))
+               return NULL;
+
+       bidlist = build_id_cache__list_all(true);
+       if (!bidlist)
+               return NULL;
+
+       strlist__for_each_entry(nd, bidlist) {
+               if (strncmp(nd->s, incomplete_sbuild_id, len) != 0)
+                       continue;
+               if (cand) {     /* Error: There are more than 2 candidates. */
+                       cand = NULL;
+                       break;
+               }
+               cand = nd;
+       }
+       if (cand)
+               sbuild_id = strdup(cand->s);
+       strlist__delete(bidlist);
+
+       return sbuild_id;
+}
+
+char *build_id_cache__cachedir(const char *sbuild_id, const char *name,
+                              bool is_kallsyms, bool is_vdso)
 {
        char *realname = (char *)name, *filename;
        bool slash = is_kallsyms || is_vdso;
@@ -352,8 +578,9 @@ static char *build_id_cache__dirname_from_path(const char *name,
                        return NULL;
        }
 
-       if (asprintf(&filename, "%s%s%s", buildid_dir, slash ? "/" : "",
-                    is_vdso ? DSO__NAME_VDSO : realname) < 0)
+       if (asprintf(&filename, "%s%s%s%s%s", buildid_dir, slash ? "/" : "",
+                    is_vdso ? DSO__NAME_VDSO : realname,
+                    sbuild_id ? "/" : "", sbuild_id ?: "") < 0)
                filename = NULL;
 
        if (!slash)
@@ -368,7 +595,7 @@ int build_id_cache__list_build_ids(const char *pathname,
        char *dir_name;
        int ret = 0;
 
-       dir_name = build_id_cache__dirname_from_path(pathname, false, false);
+       dir_name = build_id_cache__cachedir(NULL, pathname, false, false);
        if (!dir_name)
                return -ENOMEM;
 
@@ -380,12 +607,36 @@ int build_id_cache__list_build_ids(const char *pathname,
        return ret;
 }
 
+#if defined(HAVE_LIBELF_SUPPORT) && defined(HAVE_GELF_GETNOTE_SUPPORT)
+static int build_id_cache__add_sdt_cache(const char *sbuild_id,
+                                         const char *realname)
+{
+       struct probe_cache *cache;
+       int ret;
+
+       cache = probe_cache__new(sbuild_id);
+       if (!cache)
+               return -1;
+
+       ret = probe_cache__scan_sdt(cache, realname);
+       if (ret >= 0) {
+               pr_debug("Found %d SDTs in %s\n", ret, realname);
+               if (probe_cache__commit(cache) < 0)
+                       ret = -1;
+       }
+       probe_cache__delete(cache);
+       return ret;
+}
+#else
+#define build_id_cache__add_sdt_cache(sbuild_id, realname) (0)
+#endif
+
 int build_id_cache__add_s(const char *sbuild_id, const char *name,
                          bool is_kallsyms, bool is_vdso)
 {
        const size_t size = PATH_MAX;
        char *realname = NULL, *filename = NULL, *dir_name = NULL,
-            *linkname = zalloc(size), *targetname, *tmp;
+            *linkname = zalloc(size), *tmp;
        int err = -1;
 
        if (!is_kallsyms) {
@@ -394,14 +645,22 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name,
                        goto out_free;
        }
 
-       dir_name = build_id_cache__dirname_from_path(name, is_kallsyms, is_vdso);
+       dir_name = build_id_cache__cachedir(sbuild_id, name,
+                                           is_kallsyms, is_vdso);
        if (!dir_name)
                goto out_free;
 
+       /* Remove old style build-id cache */
+       if (is_regular_file(dir_name))
+               if (unlink(dir_name))
+                       goto out_free;
+
        if (mkdir_p(dir_name, 0755))
                goto out_free;
 
-       if (asprintf(&filename, "%s/%s", dir_name, sbuild_id) < 0) {
+       /* Save the allocated buildid dirname */
+       if (asprintf(&filename, "%s/%s", dir_name,
+                    build_id_cache__basename(is_kallsyms, is_vdso)) < 0) {
                filename = NULL;
                goto out_free;
        }
@@ -415,7 +674,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name,
                        goto out_free;
        }
 
-       if (!build_id__filename(sbuild_id, linkname, size))
+       if (!build_id_cache__linkname(sbuild_id, linkname, size))
                goto out_free;
        tmp = strrchr(linkname, '/');
        *tmp = '\0';
@@ -424,11 +683,16 @@ int build_id_cache__add_s(const char *sbuild_id, const char *name,
                goto out_free;
 
        *tmp = '/';
-       targetname = filename + strlen(buildid_dir) - 5;
-       memcpy(targetname, "../..", 5);
+       tmp = dir_name + strlen(buildid_dir) - 5;
+       memcpy(tmp, "../..", 5);
 
-       if (symlink(targetname, linkname) == 0)
+       if (symlink(tmp, linkname) == 0)
                err = 0;
+
+       /* Update SDT cache : error is just warned */
+       if (build_id_cache__add_sdt_cache(sbuild_id, realname) < 0)
+               pr_debug("Failed to update/scan SDT cache for %s\n", realname);
+
 out_free:
        if (!is_kallsyms)
                free(realname);
@@ -452,7 +716,7 @@ static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
 bool build_id_cache__cached(const char *sbuild_id)
 {
        bool ret = false;
-       char *filename = build_id__filename(sbuild_id, NULL, 0);
+       char *filename = build_id_cache__linkname(sbuild_id, NULL, 0);
 
        if (filename && !access(filename, F_OK))
                ret = true;
@@ -471,7 +735,7 @@ int build_id_cache__remove_s(const char *sbuild_id)
        if (filename == NULL || linkname == NULL)
                goto out_free;
 
-       if (!build_id__filename(sbuild_id, linkname, size))
+       if (!build_id_cache__linkname(sbuild_id, linkname, size))
                goto out_free;
 
        if (access(linkname, F_OK))
@@ -489,7 +753,7 @@ int build_id_cache__remove_s(const char *sbuild_id)
        tmp = strrchr(linkname, '/') + 1;
        snprintf(tmp, size - (tmp - linkname), "%s", filename);
 
-       if (unlink(linkname))
+       if (rm_rf(linkname))
                goto out_free;
 
        err = 0;
@@ -501,7 +765,7 @@ out_free:
 
 static int dso__cache_build_id(struct dso *dso, struct machine *machine)
 {
-       bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
+       bool is_kallsyms = dso__is_kallsyms(dso);
        bool is_vdso = dso__is_vdso(dso);
        const char *name = dso->long_name;
        char nm[PATH_MAX];
index 64af3e20610d7718ecacfad3810fc4bff4ee882d..d27990610f9f4bfff8598bd71855f4b1f14f1a8c 100644 (file)
@@ -14,6 +14,8 @@ struct dso;
 int build_id__sprintf(const u8 *build_id, int len, char *bf);
 int sysfs__sprintf_build_id(const char *root_dir, char *sbuild_id);
 int filename__sprintf_build_id(const char *pathname, char *sbuild_id);
+char *build_id_cache__kallsyms_path(const char *sbuild_id, char *bf,
+                                   size_t size);
 
 char *dso__build_id_filename(const struct dso *dso, char *bf, size_t size);
 bool dso__build_id_is_kmod(const struct dso *dso, char *bf, size_t size);
@@ -28,6 +30,12 @@ bool perf_session__read_build_ids(struct perf_session *session, bool with_hits);
 int perf_session__write_buildid_table(struct perf_session *session, int fd);
 int perf_session__cache_build_ids(struct perf_session *session);
 
+char *build_id_cache__origname(const char *sbuild_id);
+char *build_id_cache__linkname(const char *sbuild_id, char *bf, size_t size);
+char *build_id_cache__cachedir(const char *sbuild_id, const char *name,
+                              bool is_kallsyms, bool is_vdso);
+struct strlist *build_id_cache__list_all(bool validonly);
+char *build_id_cache__complement(const char *incomplete_sbuild_id);
 int build_id_cache__list_build_ids(const char *pathname,
                                   struct strlist **result);
 bool build_id_cache__cached(const char *sbuild_id);
index 0d814bb746617965895c6fb7e1f3a58b5a0ce9b9..512c0c83fbc6a9a9ed2a9c996de74ff6fb45eb62 100644 (file)
@@ -1,40 +1,20 @@
 #ifndef __PERF_CACHE_H
 #define __PERF_CACHE_H
 
-#include <stdbool.h>
-#include "util.h"
 #include "strbuf.h"
 #include <subcmd/pager.h>
-#include "../perf.h"
 #include "../ui/ui.h"
 
 #include <linux/string.h>
 
 #define CMD_EXEC_PATH "--exec-path"
-#define CMD_PERF_DIR "--perf-dir="
-#define CMD_WORK_TREE "--work-tree="
 #define CMD_DEBUGFS_DIR "--debugfs-dir="
 
-#define PERF_DIR_ENVIRONMENT "PERF_DIR"
-#define PERF_WORK_TREE_ENVIRONMENT "PERF_WORK_TREE"
 #define EXEC_PATH_ENVIRONMENT "PERF_EXEC_PATH"
-#define DEFAULT_PERF_DIR_ENVIRONMENT ".perf"
 #define PERF_DEBUGFS_ENVIRONMENT "PERF_DEBUGFS_DIR"
 #define PERF_TRACEFS_ENVIRONMENT "PERF_TRACEFS_DIR"
 #define PERF_PAGER_ENVIRONMENT "PERF_PAGER"
 
-extern const char *config_exclusive_filename;
-
-typedef int (*config_fn_t)(const char *, const char *, void *);
-int perf_default_config(const char *, const char *, void *);
-int perf_config(config_fn_t fn, void *);
-int perf_config_int(const char *, const char *);
-u64 perf_config_u64(const char *, const char *);
-int perf_config_bool(const char *, const char *);
-int config_error_nonbool(const char *);
-const char *perf_config_dirname(const char *, const char *);
-const char *perf_etc_perfconfig(void);
-
 char *alias_lookup(const char *alias);
 int split_cmdline(char *cmdline, const char ***argv);
 
@@ -45,9 +25,6 @@ static inline int is_absolute_path(const char *path)
        return path[0] == '/';
 }
 
-char *strip_path_suffix(const char *path, const char *suffix);
-
 char *mkpath(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
-char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2)));
 
 #endif /* __PERF_CACHE_H */
index 65e2a4f7cb4e810711fa549cb09977b686c08ee2..13e75549c4407dcf750c52e123ce3d48cc57ed2c 100644 (file)
@@ -94,6 +94,7 @@ struct callchain_param {
        enum perf_call_graph_mode record_mode;
        u32                     dump_size;
        enum chain_mode         mode;
+       u16                     max_stack;
        u32                     print_limit;
        double                  min_percent;
        sort_chain_func_t       sort;
@@ -105,6 +106,7 @@ struct callchain_param {
 };
 
 extern struct callchain_param callchain_param;
+extern struct callchain_param callchain_param_default;
 
 struct callchain_list {
        u64                     ip;
index 90aa1b46b2e5be06578befd275b6104bc9522dd3..8fdee24725a7f59febb37eeb85b3a22d8dd1f82b 100644 (file)
@@ -81,7 +81,7 @@ static int add_cgroup(struct perf_evlist *evlist, char *str)
        /*
         * check if cgrp is already defined, if so we reuse it
         */
-       evlist__for_each(evlist, counter) {
+       evlist__for_each_entry(evlist, counter) {
                cgrp = counter->cgrp;
                if (!cgrp)
                        continue;
@@ -110,7 +110,7 @@ static int add_cgroup(struct perf_evlist *evlist, char *str)
         * if add cgroup N, then need to find event N
         */
        n = 0;
-       evlist__for_each(evlist, counter) {
+       evlist__for_each_entry(evlist, counter) {
                if (n == nr_cgroups)
                        goto found;
                n++;
index 2babddaa24813102c0c9d7525ec8ab615b621775..f0dcd0ee0afaa0505470ca382a9d597a5e88fb3a 100644 (file)
@@ -4,18 +4,24 @@
 #include "cloexec.h"
 #include "asm/bug.h"
 #include "debug.h"
+#include <unistd.h>
+#include <asm/unistd.h>
+#include <sys/syscall.h>
 
 static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
 
-#ifdef __GLIBC_PREREQ
-#if !__GLIBC_PREREQ(2, 6)
 int __weak sched_getcpu(void)
 {
+#ifdef __NR_getcpu
+       unsigned cpu;
+       int err = syscall(__NR_getcpu, &cpu, NULL, NULL);
+       if (!err)
+               return cpu;
+#else
        errno = ENOSYS;
+#endif
        return -1;
 }
-#endif
-#endif
 
 static int perf_flag_probe(void)
 {
@@ -58,7 +64,7 @@ static int perf_flag_probe(void)
 
        WARN_ONCE(err != EINVAL && err != EBUSY,
                  "perf_event_open(..., PERF_FLAG_FD_CLOEXEC) failed with unexpected error %d (%s)\n",
-                 err, strerror_r(err, sbuf, sizeof(sbuf)));
+                 err, str_error_r(err, sbuf, sizeof(sbuf)));
 
        /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
        while (1) {
@@ -76,7 +82,7 @@ static int perf_flag_probe(void)
 
        if (WARN_ONCE(fd < 0 && err != EBUSY,
                      "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
-                     err, strerror_r(err, sbuf, sizeof(sbuf))))
+                     err, str_error_r(err, sbuf, sizeof(sbuf))))
                return -1;
 
        return 0;
index 43e84aa27e4a6489eafdebc3636d8bfb33334e3f..dbbf89b050a5dd641c384be5f1ceb5adba1b6f13 100644 (file)
@@ -1,7 +1,11 @@
 #include <linux/kernel.h>
 #include "cache.h"
+#include "config.h"
+#include <stdlib.h>
+#include <stdio.h>
 #include "color.h"
 #include <math.h>
+#include <unistd.h>
 
 int perf_use_color_default = -1;
 
index dad7d827216816b275112f1cb776396467071663..18dae745034f760566b03cdfa4e7ec6aa08dcdba 100644 (file)
@@ -26,6 +26,7 @@ static FILE *config_file;
 static const char *config_file_name;
 static int config_linenr;
 static int config_file_eof;
+static struct perf_config_set *config_set;
 
 const char *config_exclusive_filename;
 
@@ -275,7 +276,8 @@ static int perf_parse_file(config_fn_t fn, void *data)
                        break;
                }
        }
-       die("bad config file line %d in %s", config_linenr, config_file_name);
+       pr_err("bad config file line %d in %s\n", config_linenr, config_file_name);
+       return -1;
 }
 
 static int parse_unit_factor(const char *end, unsigned long *val)
@@ -371,7 +373,7 @@ int perf_config_bool(const char *name, const char *value)
        return !!perf_config_bool_or_int(name, value, &discard);
 }
 
-const char *perf_config_dirname(const char *name, const char *value)
+static const char *perf_config_dirname(const char *name, const char *value)
 {
        if (!name)
                return NULL;
@@ -477,54 +479,6 @@ static int perf_config_global(void)
        return !perf_env_bool("PERF_CONFIG_NOGLOBAL", 0);
 }
 
-int perf_config(config_fn_t fn, void *data)
-{
-       int ret = 0, found = 0;
-       const char *home = NULL;
-
-       /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
-       if (config_exclusive_filename)
-               return perf_config_from_file(fn, config_exclusive_filename, data);
-       if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) {
-               ret += perf_config_from_file(fn, perf_etc_perfconfig(),
-                                           data);
-               found += 1;
-       }
-
-       home = getenv("HOME");
-       if (perf_config_global() && home) {
-               char *user_config = strdup(mkpath("%s/.perfconfig", home));
-               struct stat st;
-
-               if (user_config == NULL) {
-                       warning("Not enough memory to process %s/.perfconfig, "
-                               "ignoring it.", home);
-                       goto out;
-               }
-
-               if (stat(user_config, &st) < 0)
-                       goto out_free;
-
-               if (st.st_uid && (st.st_uid != geteuid())) {
-                       warning("File %s not owned by current user or root, "
-                               "ignoring it.", user_config);
-                       goto out_free;
-               }
-
-               if (!st.st_size)
-                       goto out_free;
-
-               ret += perf_config_from_file(fn, user_config, data);
-               found += 1;
-out_free:
-               free(user_config);
-       }
-out:
-       if (found == 0)
-               return -1;
-       return ret;
-}
-
 static struct perf_config_section *find_section(struct list_head *sections,
                                                const char *section_name)
 {
@@ -609,8 +563,12 @@ static int collect_config(const char *var, const char *value,
        struct perf_config_section *section = NULL;
        struct perf_config_item *item = NULL;
        struct perf_config_set *set = perf_config_set;
-       struct list_head *sections = &set->sections;
+       struct list_head *sections;
+
+       if (set == NULL)
+               return -1;
 
+       sections = &set->sections;
        key = ptr = strdup(var);
        if (!key) {
                pr_debug("%s: strdup failed\n", __func__);
@@ -641,22 +599,115 @@ static int collect_config(const char *var, const char *value,
 
 out_free:
        free(key);
-       perf_config_set__delete(set);
        return -1;
 }
 
+static int perf_config_set__init(struct perf_config_set *set)
+{
+       int ret = -1;
+       const char *home = NULL;
+
+       /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
+       if (config_exclusive_filename)
+               return perf_config_from_file(collect_config, config_exclusive_filename, set);
+       if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK)) {
+               if (perf_config_from_file(collect_config, perf_etc_perfconfig(), set) < 0)
+                       goto out;
+       }
+
+       home = getenv("HOME");
+       if (perf_config_global() && home) {
+               char *user_config = strdup(mkpath("%s/.perfconfig", home));
+               struct stat st;
+
+               if (user_config == NULL) {
+                       warning("Not enough memory to process %s/.perfconfig, "
+                               "ignoring it.", home);
+                       goto out;
+               }
+
+               if (stat(user_config, &st) < 0)
+                       goto out_free;
+
+               if (st.st_uid && (st.st_uid != geteuid())) {
+                       warning("File %s not owned by current user or root, "
+                               "ignoring it.", user_config);
+                       goto out_free;
+               }
+
+               if (!st.st_size)
+                       goto out_free;
+
+               ret = perf_config_from_file(collect_config, user_config, set);
+
+out_free:
+               free(user_config);
+       }
+out:
+       return ret;
+}
+
 struct perf_config_set *perf_config_set__new(void)
 {
        struct perf_config_set *set = zalloc(sizeof(*set));
 
        if (set) {
                INIT_LIST_HEAD(&set->sections);
-               perf_config(collect_config, set);
+               if (perf_config_set__init(set) < 0) {
+                       perf_config_set__delete(set);
+                       set = NULL;
+               }
        }
 
        return set;
 }
 
+int perf_config(config_fn_t fn, void *data)
+{
+       int ret = 0;
+       char key[BUFSIZ];
+       struct perf_config_section *section;
+       struct perf_config_item *item;
+
+       if (config_set == NULL)
+               return -1;
+
+       perf_config_set__for_each_entry(config_set, section, item) {
+               char *value = item->value;
+
+               if (value) {
+                       scnprintf(key, sizeof(key), "%s.%s",
+                                 section->name, item->name);
+                       ret = fn(key, value, data);
+                       if (ret < 0) {
+                               pr_err("Error: wrong config key-value pair %s=%s\n",
+                                      key, value);
+                               break;
+                       }
+               }
+       }
+
+       return ret;
+}
+
+void perf_config__init(void)
+{
+       if (config_set == NULL)
+               config_set = perf_config_set__new();
+}
+
+void perf_config__exit(void)
+{
+       perf_config_set__delete(config_set);
+       config_set = NULL;
+}
+
+void perf_config__refresh(void)
+{
+       perf_config__exit();
+       perf_config__init();
+}
+
 static void perf_config_item__delete(struct perf_config_item *item)
 {
        zfree(&item->name);
@@ -693,6 +744,9 @@ static void perf_config_set__purge(struct perf_config_set *set)
 
 void perf_config_set__delete(struct perf_config_set *set)
 {
+       if (set == NULL)
+               return;
+
        perf_config_set__purge(set);
        free(set);
 }
index 22ec626ac718851f600a1c4b9a42c1ab00de1d66..6f813d46045e10072ca693fb12038bf40b752abc 100644 (file)
@@ -20,7 +20,47 @@ struct perf_config_set {
        struct list_head sections;
 };
 
+extern const char *config_exclusive_filename;
+
+typedef int (*config_fn_t)(const char *, const char *, void *);
+int perf_default_config(const char *, const char *, void *);
+int perf_config(config_fn_t fn, void *);
+int perf_config_int(const char *, const char *);
+u64 perf_config_u64(const char *, const char *);
+int perf_config_bool(const char *, const char *);
+int config_error_nonbool(const char *);
+const char *perf_etc_perfconfig(void);
+
 struct perf_config_set *perf_config_set__new(void);
 void perf_config_set__delete(struct perf_config_set *set);
+void perf_config__init(void);
+void perf_config__exit(void);
+void perf_config__refresh(void);
+
+/**
+ * perf_config_sections__for_each - iterate thru all the sections
+ * @list: list_head instance to iterate
+ * @section: struct perf_config_section iterator
+ */
+#define perf_config_sections__for_each_entry(list, section)    \
+        list_for_each_entry(section, list, node)
+
+/**
+ * perf_config_items__for_each - iterate thru all the items
+ * @list: list_head instance to iterate
+ * @item: struct perf_config_item iterator
+ */
+#define perf_config_items__for_each_entry(list, item)  \
+        list_for_each_entry(item, list, node)
+
+/**
+ * perf_config_set__for_each - iterate thru all the config section-item pairs
+ * @set: evlist instance to iterate
+ * @section: struct perf_config_section iterator
+ * @item: struct perf_config_item iterator
+ */
+#define perf_config_set__for_each_entry(set, section, item)                    \
+       perf_config_sections__for_each_entry(&set->sections, section)           \
+       perf_config_items__for_each_entry(&section->items, item)
 
 #endif /* __PERF_CONFIG_H */
index 02d801670f30053fa1f6344f7a944bc8e2047842..2c0b52264a468103e9bbd32b8631d76fae585ff4 100644 (file)
@@ -236,13 +236,12 @@ struct cpu_map *cpu_map__new_data(struct cpu_map_data *data)
 
 size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp)
 {
-       int i;
-       size_t printed = fprintf(fp, "%d cpu%s: ",
-                                map->nr, map->nr > 1 ? "s" : "");
-       for (i = 0; i < map->nr; ++i)
-               printed += fprintf(fp, "%s%d", i ? ", " : "", map->map[i]);
+#define BUFSIZE 1024
+       char buf[BUFSIZE];
 
-       return printed + fprintf(fp, "\n");
+       cpu_map__snprint(map, buf, sizeof(buf));
+       return fprintf(fp, "%s\n", buf);
+#undef BUFSIZE
 }
 
 struct cpu_map *cpu_map__dummy_new(void)
@@ -589,13 +588,66 @@ int cpu__setup_cpunode_map(void)
 }
 
 bool cpu_map__has(struct cpu_map *cpus, int cpu)
+{
+       return cpu_map__idx(cpus, cpu) != -1;
+}
+
+int cpu_map__idx(struct cpu_map *cpus, int cpu)
 {
        int i;
 
        for (i = 0; i < cpus->nr; ++i) {
                if (cpus->map[i] == cpu)
-                       return true;
+                       return i;
+       }
+
+       return -1;
+}
+
+int cpu_map__cpu(struct cpu_map *cpus, int idx)
+{
+       return cpus->map[idx];
+}
+
+size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size)
+{
+       int i, cpu, start = -1;
+       bool first = true;
+       size_t ret = 0;
+
+#define COMMA first ? "" : ","
+
+       for (i = 0; i < map->nr + 1; i++) {
+               bool last = i == map->nr;
+
+               cpu = last ? INT_MAX : map->map[i];
+
+               if (start == -1) {
+                       start = i;
+                       if (last) {
+                               ret += snprintf(buf + ret, size - ret,
+                                               "%s%d", COMMA,
+                                               map->map[i]);
+                       }
+               } else if (((i - start) != (cpu - map->map[start])) || last) {
+                       int end = i - 1;
+
+                       if (start == end) {
+                               ret += snprintf(buf + ret, size - ret,
+                                               "%s%d", COMMA,
+                                               map->map[start]);
+                       } else {
+                               ret += snprintf(buf + ret, size - ret,
+                                               "%s%d-%d", COMMA,
+                                               map->map[start], map->map[end]);
+                       }
+                       first = false;
+                       start = i;
+               }
        }
 
-       return false;
+#undef COMMA
+
+       pr_debug("cpumask list: %s\n", buf);
+       return ret;
 }
index 1a0a35073ce1e8b52ce7a4bc07efdcadefbb45a3..06bd689f598972fa4e57add5487ad62ac685a22f 100644 (file)
@@ -19,6 +19,7 @@ struct cpu_map *cpu_map__empty_new(int nr);
 struct cpu_map *cpu_map__dummy_new(void);
 struct cpu_map *cpu_map__new_data(struct cpu_map_data *data);
 struct cpu_map *cpu_map__read(FILE *file);
+size_t cpu_map__snprint(struct cpu_map *map, char *buf, size_t size);
 size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp);
 int cpu_map__get_socket_id(int cpu);
 int cpu_map__get_socket(struct cpu_map *map, int idx, void *data);
@@ -67,5 +68,7 @@ int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
                       int (*f)(struct cpu_map *map, int cpu, void *data),
                       void *data);
 
+int cpu_map__cpu(struct cpu_map *cpus, int idx);
 bool cpu_map__has(struct cpu_map *cpus, int cpu);
+int cpu_map__idx(struct cpu_map *cpus, int cpu);
 #endif /* __PERF_CPUMAP_H */
index 9f53020c32697e0aeb52de6ab8fa8d24a0ac24fa..4f979bb27b6ce535c1a1c9c19c1683ed52fcbf1c 100644 (file)
@@ -26,6 +26,7 @@
 #include "evlist.h"
 #include "evsel.h"
 #include "machine.h"
+#include "config.h"
 
 #define pr_N(n, fmt, ...) \
        eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
@@ -68,6 +69,9 @@ struct ctf_writer {
                };
                struct bt_ctf_field_type *array[6];
        } data;
+       struct bt_ctf_event_class       *comm_class;
+       struct bt_ctf_event_class       *exit_class;
+       struct bt_ctf_event_class       *fork_class;
 };
 
 struct convert {
@@ -76,6 +80,7 @@ struct convert {
 
        u64                     events_size;
        u64                     events_count;
+       u64                     non_sample_count;
 
        /* Ordered events configured queue size. */
        u64                     queue_size;
@@ -140,6 +145,36 @@ FUNC_VALUE_SET(s64)
 FUNC_VALUE_SET(u64)
 __FUNC_VALUE_SET(u64_hex, u64)
 
+static int string_set_value(struct bt_ctf_field *field, const char *string);
+static __maybe_unused int
+value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
+                const char *name, const char *string)
+{
+       struct bt_ctf_field_type *type = cw->data.string;
+       struct bt_ctf_field *field;
+       int ret = 0;
+
+       field = bt_ctf_field_create(type);
+       if (!field) {
+               pr_err("failed to create a field %s\n", name);
+               return -1;
+       }
+
+       ret = string_set_value(field, string);
+       if (ret) {
+               pr_err("failed to set value %s\n", name);
+               goto err_put_field;
+       }
+
+       ret = bt_ctf_event_set_payload(event, name, field);
+       if (ret)
+               pr_err("failed to set payload %s\n", name);
+
+err_put_field:
+       bt_ctf_field_put(field);
+       return ret;
+}
+
 static struct bt_ctf_field_type*
 get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
 {
@@ -731,6 +766,72 @@ static int process_sample_event(struct perf_tool *tool,
        return cs ? 0 : -1;
 }
 
+#define __NON_SAMPLE_SET_FIELD(_name, _type, _field)   \
+do {                                                   \
+       ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
+       if (ret)                                        \
+               return -1;                              \
+} while(0)
+
+#define __FUNC_PROCESS_NON_SAMPLE(_name, body)         \
+static int process_##_name##_event(struct perf_tool *tool,     \
+                                  union perf_event *_event,    \
+                                  struct perf_sample *sample,  \
+                                  struct machine *machine)     \
+{                                                              \
+       struct convert *c = container_of(tool, struct convert, tool);\
+       struct ctf_writer *cw = &c->writer;                     \
+       struct bt_ctf_event_class *event_class = cw->_name##_class;\
+       struct bt_ctf_event *event;                             \
+       struct ctf_stream *cs;                                  \
+       int ret;                                                \
+                                                               \
+       c->non_sample_count++;                                  \
+       c->events_size += _event->header.size;                  \
+       event = bt_ctf_event_create(event_class);               \
+       if (!event) {                                           \
+               pr_err("Failed to create an CTF event\n");      \
+               return -1;                                      \
+       }                                                       \
+                                                               \
+       bt_ctf_clock_set_time(cw->clock, sample->time);         \
+       body                                                    \
+       cs = ctf_stream(cw, 0);                                 \
+       if (cs) {                                               \
+               if (is_flush_needed(cs))                        \
+                       ctf_stream__flush(cs);                  \
+                                                               \
+               cs->count++;                                    \
+               bt_ctf_stream_append_event(cs->stream, event);  \
+       }                                                       \
+       bt_ctf_event_put(event);                                \
+                                                               \
+       return perf_event__process_##_name(tool, _event, sample, machine);\
+}
+
+__FUNC_PROCESS_NON_SAMPLE(comm,
+       __NON_SAMPLE_SET_FIELD(comm, u32, pid);
+       __NON_SAMPLE_SET_FIELD(comm, u32, tid);
+       __NON_SAMPLE_SET_FIELD(comm, string, comm);
+)
+__FUNC_PROCESS_NON_SAMPLE(fork,
+       __NON_SAMPLE_SET_FIELD(fork, u32, pid);
+       __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
+       __NON_SAMPLE_SET_FIELD(fork, u32, tid);
+       __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
+       __NON_SAMPLE_SET_FIELD(fork, u64, time);
+)
+
+__FUNC_PROCESS_NON_SAMPLE(exit,
+       __NON_SAMPLE_SET_FIELD(fork, u32, pid);
+       __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
+       __NON_SAMPLE_SET_FIELD(fork, u32, tid);
+       __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
+       __NON_SAMPLE_SET_FIELD(fork, u64, time);
+)
+#undef __NON_SAMPLE_SET_FIELD
+#undef __FUNC_PROCESS_NON_SAMPLE
+
 /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
 static char *change_name(char *name, char *orig_name, int dup)
 {
@@ -997,7 +1098,7 @@ static int setup_events(struct ctf_writer *cw, struct perf_session *session)
        struct perf_evsel *evsel;
        int ret;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                ret = add_event(cw, evsel);
                if (ret)
                        return ret;
@@ -1005,12 +1106,86 @@ static int setup_events(struct ctf_writer *cw, struct perf_session *session)
        return 0;
 }
 
+#define __NON_SAMPLE_ADD_FIELD(t, n)                                           \
+       do {                                                    \
+               pr2("  field '%s'\n", #n);                      \
+               if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
+                       pr_err("Failed to add field '%s';\n", #n);\
+                       return -1;                              \
+               }                                               \
+       } while(0)
+
+#define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body)                 \
+static int add_##_name##_event(struct ctf_writer *cw)          \
+{                                                              \
+       struct bt_ctf_event_class *event_class;                 \
+       int ret;                                                \
+                                                               \
+       pr("Adding "#_name" event\n");                          \
+       event_class = bt_ctf_event_class_create("perf_" #_name);\
+       if (!event_class)                                       \
+               return -1;                                      \
+       body                                                    \
+                                                               \
+       ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
+       if (ret) {                                              \
+               pr("Failed to add event class '"#_name"' into stream.\n");\
+               return ret;                                     \
+       }                                                       \
+                                                               \
+       cw->_name##_class = event_class;                        \
+       bt_ctf_event_class_put(event_class);                    \
+       return 0;                                               \
+}
+
+__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
+       __NON_SAMPLE_ADD_FIELD(u32, pid);
+       __NON_SAMPLE_ADD_FIELD(u32, tid);
+       __NON_SAMPLE_ADD_FIELD(string, comm);
+)
+
+__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
+       __NON_SAMPLE_ADD_FIELD(u32, pid);
+       __NON_SAMPLE_ADD_FIELD(u32, ppid);
+       __NON_SAMPLE_ADD_FIELD(u32, tid);
+       __NON_SAMPLE_ADD_FIELD(u32, ptid);
+       __NON_SAMPLE_ADD_FIELD(u64, time);
+)
+
+__FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
+       __NON_SAMPLE_ADD_FIELD(u32, pid);
+       __NON_SAMPLE_ADD_FIELD(u32, ppid);
+       __NON_SAMPLE_ADD_FIELD(u32, tid);
+       __NON_SAMPLE_ADD_FIELD(u32, ptid);
+       __NON_SAMPLE_ADD_FIELD(u64, time);
+)
+
+#undef __NON_SAMPLE_ADD_FIELD
+#undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
+
+static int setup_non_sample_events(struct ctf_writer *cw,
+                                  struct perf_session *session __maybe_unused)
+{
+       int ret;
+
+       ret = add_comm_event(cw);
+       if (ret)
+               return ret;
+       ret = add_exit_event(cw);
+       if (ret)
+               return ret;
+       ret = add_fork_event(cw);
+       if (ret)
+               return ret;
+       return 0;
+}
+
 static void cleanup_events(struct perf_session *session)
 {
        struct perf_evlist *evlist = session->evlist;
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                struct evsel_priv *priv;
 
                priv = evsel->priv;
@@ -1273,13 +1448,14 @@ static int convert__config(const char *var, const char *value, void *cb)
        return 0;
 }
 
-int bt_convert__perf2ctf(const char *input, const char *path, bool force)
+int bt_convert__perf2ctf(const char *input, const char *path,
+                        struct perf_data_convert_opts *opts)
 {
        struct perf_session *session;
        struct perf_data_file file = {
                .path = input,
                .mode = PERF_DATA_MODE_READ,
-               .force = force,
+               .force = opts->force,
        };
        struct convert c = {
                .tool = {
@@ -1299,6 +1475,12 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
        struct ctf_writer *cw = &c.writer;
        int err = -1;
 
+       if (opts->all) {
+               c.tool.comm = process_comm_event;
+               c.tool.exit = process_exit_event;
+               c.tool.fork = process_fork_event;
+       }
+
        perf_config(convert__config, &c);
 
        /* CTF writer */
@@ -1323,6 +1505,9 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
        if (setup_events(cw, session))
                goto free_session;
 
+       if (opts->all && setup_non_sample_events(cw, session))
+               goto free_session;
+
        if (setup_streams(cw, session))
                goto free_session;
 
@@ -1337,10 +1522,15 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
                file.path, path);
 
        fprintf(stderr,
-               "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
+               "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
                (double) c.events_size / 1024.0 / 1024.0,
                c.events_count);
 
+       if (!c.non_sample_count)
+               fprintf(stderr, ") ]\n");
+       else
+               fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
+
        cleanup_events(session);
        perf_session__delete(session);
        ctf_writer__cleanup(cw);
index 4c204342a9d838aa32dce4c69688bdc8b8f85bbc..9a3b587f76c196af463030349a0373b98c3d07f3 100644 (file)
@@ -1,8 +1,10 @@
 #ifndef __DATA_CONVERT_BT_H
 #define __DATA_CONVERT_BT_H
+#include "data-convert.h"
 #ifdef HAVE_LIBBABELTRACE_SUPPORT
 
-int bt_convert__perf2ctf(const char *input_name, const char *to_ctf, bool force);
+int bt_convert__perf2ctf(const char *input_name, const char *to_ctf,
+                        struct perf_data_convert_opts *opts);
 
 #endif /* HAVE_LIBBABELTRACE_SUPPORT */
 #endif /* __DATA_CONVERT_BT_H */
diff --git a/tools/perf/util/data-convert.h b/tools/perf/util/data-convert.h
new file mode 100644 (file)
index 0000000..5314962
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef __DATA_CONVERT_H
+#define __DATA_CONVERT_H
+
+struct perf_data_convert_opts {
+       bool force;
+       bool all;
+};
+
+#endif /* __DATA_CONVERT_H */
index be83516155ee5d7914d699b48df33441b7f6f345..60bfc9ca1e22f3a1b10d58169d2ed70ccfe040cc 100644 (file)
@@ -57,7 +57,7 @@ static int open_file_read(struct perf_data_file *file)
                int err = errno;
 
                pr_err("failed to open %s: %s", file->path,
-                       strerror_r(err, sbuf, sizeof(sbuf)));
+                       str_error_r(err, sbuf, sizeof(sbuf)));
                if (err == ENOENT && !strcmp(file->path, "perf.data"))
                        pr_err("  (try 'perf record' first)");
                pr_err("\n");
@@ -99,7 +99,7 @@ static int open_file_write(struct perf_data_file *file)
 
        if (fd < 0)
                pr_err("failed to open %s : %s\n", file->path,
-                       strerror_r(errno, sbuf, sizeof(sbuf)));
+                       str_error_r(errno, sbuf, sizeof(sbuf)));
 
        return fd;
 }
index c9a6dc173e74eb77f56673c6a609a5a92c788498..b0c2b5c5d3375fc919f24f92a9e4b7eba11c084d 100644 (file)
@@ -233,17 +233,6 @@ int db_export__symbol(struct db_export *dbe, struct symbol *sym,
        return 0;
 }
 
-static struct thread *get_main_thread(struct machine *machine, struct thread *thread)
-{
-       if (thread->pid_ == thread->tid)
-               return thread__get(thread);
-
-       if (thread->pid_ == -1)
-               return NULL;
-
-       return machine__find_thread(machine, thread->pid_, thread->pid_);
-}
-
 static int db_ids_from_al(struct db_export *dbe, struct addr_location *al,
                          u64 *dso_db_id, u64 *sym_db_id, u64 *offset)
 {
@@ -382,7 +371,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
        if (err)
                return err;
 
-       main_thread = get_main_thread(al->machine, thread);
+       main_thread = thread__main_thread(al->machine, thread);
        if (main_thread)
                comm = machine__thread_exec_comm(al->machine, main_thread);
 
index 14bafda79edaeba1bbd3e007b5faa8607d0570bf..d242adc3d5a2ac7217879a37d0f5ed26b344351a 100644 (file)
@@ -38,7 +38,7 @@ extern int debug_data_convert;
 #define pr_oe_time(t, fmt, ...)  pr_time_N(1, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__)
 #define pr_oe_time2(t, fmt, ...) pr_time_N(2, debug_ordered_events, t, pr_fmt(fmt), ##__VA_ARGS__)
 
-#define STRERR_BUFSIZE 128     /* For the buffer size of strerror_r */
+#define STRERR_BUFSIZE 128     /* For the buffer size of str_error_r */
 
 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(union perf_event *event);
diff --git a/tools/perf/util/demangle-rust.c b/tools/perf/util/demangle-rust.c
new file mode 100644 (file)
index 0000000..f9dafa8
--- /dev/null
@@ -0,0 +1,269 @@
+#include <string.h>
+#include "util.h"
+#include "debug.h"
+
+#include "demangle-rust.h"
+
+/*
+ * Mangled Rust symbols look like this:
+ *
+ *     _$LT$std..sys..fd..FileDesc$u20$as$u20$core..ops..Drop$GT$::drop::hc68340e1baa4987a
+ *
+ * The original symbol is:
+ *
+ *     <std::sys::fd::FileDesc as core::ops::Drop>::drop
+ *
+ * The last component of the path is a 64-bit hash in lowercase hex, prefixed
+ * with "h". Rust does not have a global namespace between crates, an illusion
+ * which Rust maintains by using the hash to distinguish things that would
+ * otherwise have the same symbol.
+ *
+ * Any path component not starting with a XID_Start character is prefixed with
+ * "_".
+ *
+ * The following escape sequences are used:
+ *
+ *     ","  =>  $C$
+ *     "@"  =>  $SP$
+ *     "*"  =>  $BP$
+ *     "&"  =>  $RF$
+ *     "<"  =>  $LT$
+ *     ">"  =>  $GT$
+ *     "("  =>  $LP$
+ *     ")"  =>  $RP$
+ *     " "  =>  $u20$
+ *     "'"  =>  $u27$
+ *     "["  =>  $u5b$
+ *     "]"  =>  $u5d$
+ *     "~"  =>  $u7e$
+ *
+ * A double ".." means "::" and a single "." means "-".
+ *
+ * The only characters allowed in the mangled symbol are a-zA-Z0-9 and _.:$
+ */
+
+static const char *hash_prefix = "::h";
+static const size_t hash_prefix_len = 3;
+static const size_t hash_len = 16;
+
+static bool is_prefixed_hash(const char *start);
+static bool looks_like_rust(const char *sym, size_t len);
+static bool unescape(const char **in, char **out, const char *seq, char value);
+
+/*
+ * INPUT:
+ *     sym: symbol that has been through BFD-demangling
+ *
+ * This function looks for the following indicators:
+ *
+ *  1. The hash must consist of "h" followed by 16 lowercase hex digits.
+ *
+ *  2. As a sanity check, the hash must use between 5 and 15 of the 16 possible
+ *     hex digits. This is true of 99.9998% of hashes so once in your life you
+ *     may see a false negative. The point is to notice path components that
+ *     could be Rust hashes but are probably not, like "haaaaaaaaaaaaaaaa". In
+ *     this case a false positive (non-Rust symbol has an important path
+ *     component removed because it looks like a Rust hash) is worse than a
+ *     false negative (the rare Rust symbol is not demangled) so this sets the
+ *     balance in favor of false negatives.
+ *
+ *  3. There must be no characters other than a-zA-Z0-9 and _.:$
+ *
+ *  4. There must be no unrecognized $-sign sequences.
+ *
+ *  5. There must be no sequence of three or more dots in a row ("...").
+ */
+bool
+rust_is_mangled(const char *sym)
+{
+       size_t len, len_without_hash;
+
+       if (!sym)
+               return false;
+
+       len = strlen(sym);
+       if (len <= hash_prefix_len + hash_len)
+               /* Not long enough to contain "::h" + hash + something else */
+               return false;
+
+       len_without_hash = len - (hash_prefix_len + hash_len);
+       if (!is_prefixed_hash(sym + len_without_hash))
+               return false;
+
+       return looks_like_rust(sym, len_without_hash);
+}
+
+/*
+ * A hash is the prefix "::h" followed by 16 lowercase hex digits. The hex
+ * digits must comprise between 5 and 15 (inclusive) distinct digits.
+ */
+static bool is_prefixed_hash(const char *str)
+{
+       const char *end;
+       bool seen[16];
+       size_t i;
+       int count;
+
+       if (strncmp(str, hash_prefix, hash_prefix_len))
+               return false;
+       str += hash_prefix_len;
+
+       memset(seen, false, sizeof(seen));
+       for (end = str + hash_len; str < end; str++)
+               if (*str >= '0' && *str <= '9')
+                       seen[*str - '0'] = true;
+               else if (*str >= 'a' && *str <= 'f')
+                       seen[*str - 'a' + 10] = true;
+               else
+                       return false;
+
+       /* Count how many distinct digits seen */
+       count = 0;
+       for (i = 0; i < 16; i++)
+               if (seen[i])
+                       count++;
+
+       return count >= 5 && count <= 15;
+}
+
+static bool looks_like_rust(const char *str, size_t len)
+{
+       const char *end = str + len;
+
+       while (str < end)
+               switch (*str) {
+               case '$':
+                       if (!strncmp(str, "$C$", 3))
+                               str += 3;
+                       else if (!strncmp(str, "$SP$", 4)
+                                       || !strncmp(str, "$BP$", 4)
+                                       || !strncmp(str, "$RF$", 4)
+                                       || !strncmp(str, "$LT$", 4)
+                                       || !strncmp(str, "$GT$", 4)
+                                       || !strncmp(str, "$LP$", 4)
+                                       || !strncmp(str, "$RP$", 4))
+                               str += 4;
+                       else if (!strncmp(str, "$u20$", 5)
+                                       || !strncmp(str, "$u27$", 5)
+                                       || !strncmp(str, "$u5b$", 5)
+                                       || !strncmp(str, "$u5d$", 5)
+                                       || !strncmp(str, "$u7e$", 5))
+                               str += 5;
+                       else
+                               return false;
+                       break;
+               case '.':
+                       /* Do not allow three or more consecutive dots */
+                       if (!strncmp(str, "...", 3))
+                               return false;
+                       /* Fall through */
+               case 'a' ... 'z':
+               case 'A' ... 'Z':
+               case '0' ... '9':
+               case '_':
+               case ':':
+                       str++;
+                       break;
+               default:
+                       return false;
+               }
+
+       return true;
+}
+
+/*
+ * INPUT:
+ *     sym: symbol for which rust_is_mangled(sym) returns true
+ *
+ * The input is demangled in-place because the mangled name is always longer
+ * than the demangled one.
+ */
+void
+rust_demangle_sym(char *sym)
+{
+       const char *in;
+       char *out;
+       const char *end;
+
+       if (!sym)
+               return;
+
+       in = sym;
+       out = sym;
+       end = sym + strlen(sym) - (hash_prefix_len + hash_len);
+
+       while (in < end)
+               switch (*in) {
+               case '$':
+                       if (!(unescape(&in, &out, "$C$", ',')
+                                       || unescape(&in, &out, "$SP$", '@')
+                                       || unescape(&in, &out, "$BP$", '*')
+                                       || unescape(&in, &out, "$RF$", '&')
+                                       || unescape(&in, &out, "$LT$", '<')
+                                       || unescape(&in, &out, "$GT$", '>')
+                                       || unescape(&in, &out, "$LP$", '(')
+                                       || unescape(&in, &out, "$RP$", ')')
+                                       || unescape(&in, &out, "$u20$", ' ')
+                                       || unescape(&in, &out, "$u27$", '\'')
+                                       || unescape(&in, &out, "$u5b$", '[')
+                                       || unescape(&in, &out, "$u5d$", ']')
+                                       || unescape(&in, &out, "$u7e$", '~'))) {
+                               pr_err("demangle-rust: unexpected escape sequence");
+                               goto done;
+                       }
+                       break;
+               case '_':
+                       /*
+                        * If this is the start of a path component and the next
+                        * character is an escape sequence, ignore the
+                        * underscore. The mangler inserts an underscore to make
+                        * sure the path component begins with a XID_Start
+                        * character.
+                        */
+                       if ((in == sym || in[-1] == ':') && in[1] == '$')
+                               in++;
+                       else
+                               *out++ = *in++;
+                       break;
+               case '.':
+                       if (in[1] == '.') {
+                               /* ".." becomes "::" */
+                               *out++ = ':';
+                               *out++ = ':';
+                               in += 2;
+                       } else {
+                               /* "." becomes "-" */
+                               *out++ = '-';
+                               in++;
+                       }
+                       break;
+               case 'a' ... 'z':
+               case 'A' ... 'Z':
+               case '0' ... '9':
+               case ':':
+                       *out++ = *in++;
+                       break;
+               default:
+                       pr_err("demangle-rust: unexpected character '%c' in symbol\n",
+                               *in);
+                       goto done;
+               }
+
+done:
+       *out = '\0';
+}
+
+static bool unescape(const char **in, char **out, const char *seq, char value)
+{
+       size_t len = strlen(seq);
+
+       if (strncmp(*in, seq, len))
+               return false;
+
+       **out = value;
+
+       *in += len;
+       *out += 1;
+
+       return true;
+}
diff --git a/tools/perf/util/demangle-rust.h b/tools/perf/util/demangle-rust.h
new file mode 100644 (file)
index 0000000..7b41ead
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef __PERF_DEMANGLE_RUST
+#define __PERF_DEMANGLE_RUST 1
+
+bool rust_is_mangled(const char *str);
+void rust_demangle_sym(char *str);
+
+#endif /* __PERF_DEMANGLE_RUST */
index 5d286f5d7906798a6ffe1e5e91b8a52bf4bc1973..774f6ec884d50cf0d2d3696689e0ca8c4e8e27c5 100644 (file)
@@ -335,7 +335,7 @@ static int do_open(char *name)
                        return fd;
 
                pr_debug("dso open failed: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
+                        str_error_r(errno, sbuf, sizeof(sbuf)));
                if (!dso__data_open_cnt || errno != EMFILE)
                        break;
 
@@ -442,17 +442,27 @@ static rlim_t get_fd_limit(void)
        return limit;
 }
 
-static bool may_cache_fd(void)
+static rlim_t fd_limit;
+
+/*
+ * Used only by tests/dso-data.c to reset the environment
+ * for tests. I dont expect we should change this during
+ * standard runtime.
+ */
+void reset_fd_limit(void)
 {
-       static rlim_t limit;
+       fd_limit = 0;
+}
 
-       if (!limit)
-               limit = get_fd_limit();
+static bool may_cache_fd(void)
+{
+       if (!fd_limit)
+               fd_limit = get_fd_limit();
 
-       if (limit == RLIM_INFINITY)
+       if (fd_limit == RLIM_INFINITY)
                return true;
 
-       return limit > (rlim_t) dso__data_open_cnt;
+       return fd_limit > (rlim_t) dso__data_open_cnt;
 }
 
 /*
@@ -776,7 +786,7 @@ static int data_file_size(struct dso *dso, struct machine *machine)
        if (fstat(dso->data.fd, &st) < 0) {
                ret = -errno;
                pr_err("dso cache fstat failed: %s\n",
-                      strerror_r(errno, sbuf, sizeof(sbuf)));
+                      str_error_r(errno, sbuf, sizeof(sbuf)));
                dso->data.status = DSO_DATA_STATUS_ERROR;
                goto out;
        }
@@ -1356,7 +1366,7 @@ int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
        BUG_ON(buflen == 0);
 
        if (errnum >= 0) {
-               const char *err = strerror_r(errnum, buf, buflen);
+               const char *err = str_error_r(errnum, buf, buflen);
 
                if (err != buf)
                        scnprintf(buf, buflen, "%s", err);
index 0953280629cffde7373b4689c8cdf5d16e992fb5..ecc4bbd3f82e3e172a89480d04466ba95eab7b09 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/atomic.h>
 #include <linux/types.h>
 #include <linux/rbtree.h>
+#include <sys/types.h>
 #include <stdbool.h>
 #include <pthread.h>
 #include <linux/types.h>
@@ -349,10 +350,17 @@ static inline bool dso__is_kcore(struct dso *dso)
               dso->binary_type == DSO_BINARY_TYPE__GUEST_KCORE;
 }
 
+static inline bool dso__is_kallsyms(struct dso *dso)
+{
+       return dso->kernel && dso->long_name[0] != '/';
+}
+
 void dso__free_a2l(struct dso *dso);
 
 enum dso_type dso__type(struct dso *dso, struct machine *machine);
 
 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen);
 
+void reset_fd_limit(void);
+
 #endif /* __PERF_DSO */
index 49a11d9d8b8f050efa48715cfba9e54731f63eef..bb964e86b09de18bb1eafe3765e5fccb1e87e448 100644 (file)
@@ -18,10 +18,13 @@ void perf_env__exit(struct perf_env *env)
        zfree(&env->cmdline_argv);
        zfree(&env->sibling_cores);
        zfree(&env->sibling_threads);
-       zfree(&env->numa_nodes);
        zfree(&env->pmu_mappings);
        zfree(&env->cpu);
 
+       for (i = 0; i < env->nr_numa_nodes; i++)
+               cpu_map__put(env->numa_nodes[i].map);
+       zfree(&env->numa_nodes);
+
        for (i = 0; i < env->caches_cnt; i++)
                cpu_cache_level__free(&env->caches[i]);
        zfree(&env->caches);
index 56cffb60a0b42e456a11fb2bd489d74a69893b0c..b164dfd2dcbf591c5749d78146d2ff0b8493e5e1 100644 (file)
@@ -2,6 +2,7 @@
 #define __PERF_ENV_H
 
 #include <linux/types.h>
+#include "cpumap.h"
 
 struct cpu_topology_map {
        int     socket_id;
@@ -18,6 +19,13 @@ struct cpu_cache_level {
        char    *map;
 };
 
+struct numa_node {
+       u32              node;
+       u64              mem_total;
+       u64              mem_free;
+       struct cpu_map  *map;
+};
+
 struct perf_env {
        char                    *hostname;
        char                    *os_release;
@@ -40,11 +48,11 @@ struct perf_env {
        const char              **cmdline_argv;
        char                    *sibling_cores;
        char                    *sibling_threads;
-       char                    *numa_nodes;
        char                    *pmu_mappings;
        struct cpu_topology_map *cpu;
        struct cpu_cache_level  *caches;
        int                      caches_cnt;
+       struct numa_node        *numa_nodes;
 };
 
 extern struct perf_env perf_env;
index 9b141f12329edc750de5eefd47c3c7f2cb46e503..e20438b784bed4019e50080e54fd06fad2c0776e 100644 (file)
@@ -1092,7 +1092,7 @@ size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
        struct cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
        size_t ret;
 
-       ret = fprintf(fp, " nr: ");
+       ret = fprintf(fp, ": ");
 
        if (cpus)
                ret += cpu_map__fprintf(cpus, fp);
index 8d363d5e65a2e14c019fd18a6129ceef7b1538c3..b32464b353aa4d6c704c7fc5c5584c4003756aec 100644 (file)
@@ -8,6 +8,7 @@
 #include "map.h"
 #include "build-id.h"
 #include "perf_regs.h"
+#include <asm/perf_regs.h>
 
 struct mmap_event {
        struct perf_event_header header;
index e82ba90cc96997b03042c79c180e2de34bec231f..2a40b8e1def70655277ac0458ad0becc1bbd8877 100644 (file)
@@ -15,6 +15,7 @@
 #include "evlist.h"
 #include "evsel.h"
 #include "debug.h"
+#include "asm/bug.h"
 #include <unistd.h>
 
 #include "parse-events.h"
@@ -27,8 +28,8 @@
 #include <linux/log2.h>
 #include <linux/err.h>
 
-static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
-static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
+static void perf_mmap__munmap(struct perf_mmap *map);
+static void perf_mmap__put(struct perf_mmap *map);
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
@@ -44,7 +45,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
        perf_evlist__set_maps(evlist, cpus, threads);
        fdarray__init(&evlist->pollfd, 64);
        evlist->workload.pid = -1;
-       evlist->backward = false;
+       evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
 }
 
 struct perf_evlist *perf_evlist__new(void)
@@ -100,7 +101,7 @@ static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel)
+       evlist__for_each_entry(evlist, evsel)
                perf_evsel__calc_id_pos(evsel);
 
        perf_evlist__set_id_pos(evlist);
@@ -110,7 +111,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
 {
        struct perf_evsel *pos, *n;
 
-       evlist__for_each_safe(evlist, n, pos) {
+       evlist__for_each_entry_safe(evlist, n, pos) {
                list_del_init(&pos->node);
                pos->evlist = NULL;
                perf_evsel__delete(pos);
@@ -122,11 +123,15 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
 void perf_evlist__exit(struct perf_evlist *evlist)
 {
        zfree(&evlist->mmap);
+       zfree(&evlist->backward_mmap);
        fdarray__exit(&evlist->pollfd);
 }
 
 void perf_evlist__delete(struct perf_evlist *evlist)
 {
+       if (evlist == NULL)
+               return;
+
        perf_evlist__munmap(evlist);
        perf_evlist__close(evlist);
        cpu_map__put(evlist->cpus);
@@ -161,7 +166,7 @@ static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel)
+       evlist__for_each_entry(evlist, evsel)
                __perf_evlist__propagate_maps(evlist, evsel);
 }
 
@@ -190,7 +195,7 @@ void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
 {
        struct perf_evsel *evsel, *temp;
 
-       __evlist__for_each_safe(list, temp, evsel) {
+       __evlist__for_each_entry_safe(list, temp, evsel) {
                list_del_init(&evsel->node);
                perf_evlist__add(evlist, evsel);
        }
@@ -205,7 +210,7 @@ void __perf_evlist__set_leader(struct list_head *list)
 
        leader->nr_members = evsel->idx - leader->idx + 1;
 
-       __evlist__for_each(list, evsel) {
+       __evlist__for_each_entry(list, evsel) {
                evsel->leader = leader;
        }
 }
@@ -296,7 +301,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist,
        return 0;
 
 out_delete_partial_list:
-       __evlist__for_each_safe(&head, n, evsel)
+       __evlist__for_each_entry_safe(&head, n, evsel)
                perf_evsel__delete(evsel);
        return -1;
 }
@@ -317,7 +322,7 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type   == PERF_TYPE_TRACEPOINT &&
                    (int)evsel->attr.config == id)
                        return evsel;
@@ -332,7 +337,7 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
                    (strcmp(evsel->name, name) == 0))
                        return evsel;
@@ -367,7 +372,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
 {
        struct perf_evsel *pos;
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                if (!perf_evsel__is_group_leader(pos) || !pos->fd)
                        continue;
                perf_evsel__disable(pos);
@@ -380,7 +385,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
 {
        struct perf_evsel *pos;
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                if (!perf_evsel__is_group_leader(pos) || !pos->fd)
                        continue;
                perf_evsel__enable(pos);
@@ -448,7 +453,7 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
        int nfds = 0;
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->system_wide)
                        nfds += nr_cpus;
                else
@@ -462,15 +467,16 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
        return 0;
 }
 
-static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
+static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
+                                    struct perf_mmap *map, short revent)
 {
-       int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
+       int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
        /*
         * Save the idx so that when we filter out fds POLLHUP'ed we can
         * close the associated evlist->mmap[] entry.
         */
        if (pos >= 0) {
-               evlist->pollfd.priv[pos].idx = idx;
+               evlist->pollfd.priv[pos].ptr = map;
 
                fcntl(fd, F_SETFL, O_NONBLOCK);
        }
@@ -480,20 +486,22 @@ static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx
 
 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
 {
-       return __perf_evlist__add_pollfd(evlist, fd, -1);
+       return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
 }
 
-static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
+static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
+                                        void *arg __maybe_unused)
 {
-       struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
+       struct perf_mmap *map = fda->priv[fd].ptr;
 
-       perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
+       if (map)
+               perf_mmap__put(map);
 }
 
 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
 {
        return fdarray__filter(&evlist->pollfd, revents_and_mask,
-                              perf_evlist__munmap_filtered);
+                              perf_evlist__munmap_filtered, NULL);
 }
 
 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
@@ -647,8 +655,8 @@ static int perf_evlist__event2id(struct perf_evlist *evlist,
        return 0;
 }
 
-static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
-                                                  union perf_event *event)
+struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
+                                           union perf_event *event)
 {
        struct perf_evsel *first = perf_evlist__first(evlist);
        struct hlist_head *head;
@@ -684,8 +692,11 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
 {
        int i;
 
+       if (!evlist->backward_mmap)
+               return 0;
+
        for (i = 0; i < evlist->nr_mmaps; i++) {
-               int fd = evlist->mmap[i].fd;
+               int fd = evlist->backward_mmap[i].fd;
                int err;
 
                if (fd < 0)
@@ -697,12 +708,12 @@ static int perf_evlist__set_paused(struct perf_evlist *evlist, bool value)
        return 0;
 }
 
-int perf_evlist__pause(struct perf_evlist *evlist)
+static int perf_evlist__pause(struct perf_evlist *evlist)
 {
        return perf_evlist__set_paused(evlist, true);
 }
 
-int perf_evlist__resume(struct perf_evlist *evlist)
+static int perf_evlist__resume(struct perf_evlist *evlist)
 {
        return perf_evlist__set_paused(evlist, false);
 }
@@ -777,9 +788,8 @@ broken_event:
        return event;
 }
 
-union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+union perf_event *perf_mmap__read_forward(struct perf_mmap *md, bool check_messup)
 {
-       struct perf_mmap *md = &evlist->mmap[idx];
        u64 head;
        u64 old = md->prev;
 
@@ -791,13 +801,12 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 
        head = perf_mmap__read_head(md);
 
-       return perf_mmap__read(md, evlist->overwrite, old, head, &md->prev);
+       return perf_mmap__read(md, check_messup, old, head, &md->prev);
 }
 
 union perf_event *
-perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
+perf_mmap__read_backward(struct perf_mmap *md)
 {
-       struct perf_mmap *md = &evlist->mmap[idx];
        u64 head, end;
        u64 start = md->prev;
 
@@ -832,9 +841,38 @@ perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
        return perf_mmap__read(md, false, start, end, &md->prev);
 }
 
-void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
+union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist, int idx)
+{
+       struct perf_mmap *md = &evlist->mmap[idx];
+
+       /*
+        * Check messup is required for forward overwritable ring buffer:
+        * memory pointed by md->prev can be overwritten in this case.
+        * No need for read-write ring buffer: kernel stop outputting when
+        * it hit md->prev (perf_mmap__consume()).
+        */
+       return perf_mmap__read_forward(md, evlist->overwrite);
+}
+
+union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist, int idx)
 {
        struct perf_mmap *md = &evlist->mmap[idx];
+
+       /*
+        * No need to check messup for backward ring buffer:
+        * We can always read arbitrary long data from a backward
+        * ring buffer unless we forget to pause it before reading.
+        */
+       return perf_mmap__read_backward(md);
+}
+
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
+{
+       return perf_evlist__mmap_read_forward(evlist, idx);
+}
+
+void perf_mmap__read_catchup(struct perf_mmap *md)
+{
        u64 head;
 
        if (!atomic_read(&md->refcnt))
@@ -844,36 +882,44 @@ void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
        md->prev = head;
 }
 
+void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx)
+{
+       perf_mmap__read_catchup(&evlist->mmap[idx]);
+}
+
 static bool perf_mmap__empty(struct perf_mmap *md)
 {
        return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
 }
 
-static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
+static void perf_mmap__get(struct perf_mmap *map)
 {
-       atomic_inc(&evlist->mmap[idx].refcnt);
+       atomic_inc(&map->refcnt);
 }
 
-static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
+static void perf_mmap__put(struct perf_mmap *md)
 {
-       BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0);
+       BUG_ON(md->base && atomic_read(&md->refcnt) == 0);
 
-       if (atomic_dec_and_test(&evlist->mmap[idx].refcnt))
-               __perf_evlist__munmap(evlist, idx);
+       if (atomic_dec_and_test(&md->refcnt))
+               perf_mmap__munmap(md);
 }
 
-void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
+void perf_mmap__consume(struct perf_mmap *md, bool overwrite)
 {
-       struct perf_mmap *md = &evlist->mmap[idx];
-
-       if (!evlist->overwrite) {
+       if (!overwrite) {
                u64 old = md->prev;
 
                perf_mmap__write_tail(md, old);
        }
 
        if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
-               perf_evlist__mmap_put(evlist, idx);
+               perf_mmap__put(md);
+}
+
+void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
+{
+       perf_mmap__consume(&evlist->mmap[idx], evlist->overwrite);
 }
 
 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
@@ -904,41 +950,52 @@ void __weak auxtrace_mmap_params__set_idx(
 {
 }
 
-static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
+static void perf_mmap__munmap(struct perf_mmap *map)
 {
-       if (evlist->mmap[idx].base != NULL) {
-               munmap(evlist->mmap[idx].base, evlist->mmap_len);
-               evlist->mmap[idx].base = NULL;
-               evlist->mmap[idx].fd = -1;
-               atomic_set(&evlist->mmap[idx].refcnt, 0);
+       if (map->base != NULL) {
+               munmap(map->base, perf_mmap__mmap_len(map));
+               map->base = NULL;
+               map->fd = -1;
+               atomic_set(&map->refcnt, 0);
        }
-       auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
+       auxtrace_mmap__munmap(&map->auxtrace_mmap);
 }
 
-void perf_evlist__munmap(struct perf_evlist *evlist)
+static void perf_evlist__munmap_nofree(struct perf_evlist *evlist)
 {
        int i;
 
-       if (evlist->mmap == NULL)
-               return;
+       if (evlist->mmap)
+               for (i = 0; i < evlist->nr_mmaps; i++)
+                       perf_mmap__munmap(&evlist->mmap[i]);
 
-       for (i = 0; i < evlist->nr_mmaps; i++)
-               __perf_evlist__munmap(evlist, i);
+       if (evlist->backward_mmap)
+               for (i = 0; i < evlist->nr_mmaps; i++)
+                       perf_mmap__munmap(&evlist->backward_mmap[i]);
+}
 
+void perf_evlist__munmap(struct perf_evlist *evlist)
+{
+       perf_evlist__munmap_nofree(evlist);
        zfree(&evlist->mmap);
+       zfree(&evlist->backward_mmap);
 }
 
-static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
+static struct perf_mmap *perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 {
        int i;
+       struct perf_mmap *map;
 
        evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
        if (cpu_map__empty(evlist->cpus))
                evlist->nr_mmaps = thread_map__nr(evlist->threads);
-       evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+       map = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
+       if (!map)
+               return NULL;
+
        for (i = 0; i < evlist->nr_mmaps; i++)
-               evlist->mmap[i].fd = -1;
-       return evlist->mmap != NULL ? 0 : -ENOMEM;
+               map[i].fd = -1;
+       return map;
 }
 
 struct mmap_params {
@@ -947,8 +1004,8 @@ struct mmap_params {
        struct auxtrace_mmap_params auxtrace_mp;
 };
 
-static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
-                              struct mmap_params *mp, int fd)
+static int perf_mmap__mmap(struct perf_mmap *map,
+                          struct mmap_params *mp, int fd)
 {
        /*
         * The last one will be done at perf_evlist__mmap_consume(), so that we
@@ -963,35 +1020,61 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
         * evlist layer can't just drop it when filtering events in
         * perf_evlist__filter_pollfd().
         */
-       atomic_set(&evlist->mmap[idx].refcnt, 2);
-       evlist->mmap[idx].prev = 0;
-       evlist->mmap[idx].mask = mp->mask;
-       evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
-                                     MAP_SHARED, fd, 0);
-       if (evlist->mmap[idx].base == MAP_FAILED) {
+       atomic_set(&map->refcnt, 2);
+       map->prev = 0;
+       map->mask = mp->mask;
+       map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot,
+                        MAP_SHARED, fd, 0);
+       if (map->base == MAP_FAILED) {
                pr_debug2("failed to mmap perf event ring buffer, error %d\n",
                          errno);
-               evlist->mmap[idx].base = NULL;
+               map->base = NULL;
                return -1;
        }
-       evlist->mmap[idx].fd = fd;
+       map->fd = fd;
 
-       if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
-                               &mp->auxtrace_mp, evlist->mmap[idx].base, fd))
+       if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
+                               &mp->auxtrace_mp, map->base, fd))
                return -1;
 
        return 0;
 }
 
+static bool
+perf_evlist__should_poll(struct perf_evlist *evlist __maybe_unused,
+                        struct perf_evsel *evsel)
+{
+       if (evsel->attr.write_backward)
+               return false;
+       return true;
+}
+
 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
                                       struct mmap_params *mp, int cpu,
-                                      int thread, int *output)
+                                      int thread, int *_output, int *_output_backward)
 {
        struct perf_evsel *evsel;
+       int revent;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
+               struct perf_mmap *maps = evlist->mmap;
+               int *output = _output;
                int fd;
 
+               if (evsel->attr.write_backward) {
+                       output = _output_backward;
+                       maps = evlist->backward_mmap;
+
+                       if (!maps) {
+                               maps = perf_evlist__alloc_mmap(evlist);
+                               if (!maps)
+                                       return -1;
+                               evlist->backward_mmap = maps;
+                               if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
+                                       perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
+                       }
+               }
+
                if (evsel->system_wide && thread)
                        continue;
 
@@ -999,15 +1082,18 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
 
                if (*output == -1) {
                        *output = fd;
-                       if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
+
+                       if (perf_mmap__mmap(&maps[idx], mp, *output)  < 0)
                                return -1;
                } else {
                        if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
                                return -1;
 
-                       perf_evlist__mmap_get(evlist, idx);
+                       perf_mmap__get(&maps[idx]);
                }
 
+               revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
+
                /*
                 * The system_wide flag causes a selected event to be opened
                 * always without a pid.  Consequently it will never get a
@@ -1016,8 +1102,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
                 * Therefore don't add it for polling.
                 */
                if (!evsel->system_wide &&
-                   __perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
-                       perf_evlist__mmap_put(evlist, idx);
+                   __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
+                       perf_mmap__put(&maps[idx]);
                        return -1;
                }
 
@@ -1043,13 +1129,14 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
        pr_debug2("perf event ring buffer mmapped per cpu\n");
        for (cpu = 0; cpu < nr_cpus; cpu++) {
                int output = -1;
+               int output_backward = -1;
 
                auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
                                              true);
 
                for (thread = 0; thread < nr_threads; thread++) {
                        if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
-                                                       thread, &output))
+                                                       thread, &output, &output_backward))
                                goto out_unmap;
                }
        }
@@ -1057,8 +1144,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
        return 0;
 
 out_unmap:
-       for (cpu = 0; cpu < nr_cpus; cpu++)
-               __perf_evlist__munmap(evlist, cpu);
+       perf_evlist__munmap_nofree(evlist);
        return -1;
 }
 
@@ -1071,20 +1157,20 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
        pr_debug2("perf event ring buffer mmapped per thread\n");
        for (thread = 0; thread < nr_threads; thread++) {
                int output = -1;
+               int output_backward = -1;
 
                auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
                                              false);
 
                if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
-                                               &output))
+                                               &output, &output_backward))
                        goto out_unmap;
        }
 
        return 0;
 
 out_unmap:
-       for (thread = 0; thread < nr_threads; thread++)
-               __perf_evlist__munmap(evlist, thread);
+       perf_evlist__munmap_nofree(evlist);
        return -1;
 }
 
@@ -1217,7 +1303,9 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
                .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
        };
 
-       if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
+       if (!evlist->mmap)
+               evlist->mmap = perf_evlist__alloc_mmap(evlist);
+       if (!evlist->mmap)
                return -ENOMEM;
 
        if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
@@ -1231,7 +1319,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
        auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
                                   auxtrace_pages, auxtrace_overwrite);
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
                    evsel->sample_id == NULL &&
                    perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
@@ -1307,7 +1395,7 @@ void __perf_evlist__set_sample_bit(struct perf_evlist *evlist,
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel)
+       evlist__for_each_entry(evlist, evsel)
                __perf_evsel__set_sample_bit(evsel, bit);
 }
 
@@ -1316,7 +1404,7 @@ void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist,
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel)
+       evlist__for_each_entry(evlist, evsel)
                __perf_evsel__reset_sample_bit(evsel, bit);
 }
 
@@ -1327,7 +1415,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **e
        const int ncpus = cpu_map__nr(evlist->cpus),
                  nthreads = thread_map__nr(evlist->threads);
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->filter == NULL)
                        continue;
 
@@ -1350,7 +1438,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
        struct perf_evsel *evsel;
        int err = 0;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
                        continue;
 
@@ -1404,7 +1492,7 @@ bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
        if (evlist->id_pos < 0 || evlist->is_pos < 0)
                return false;
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                if (pos->id_pos != evlist->id_pos ||
                    pos->is_pos != evlist->is_pos)
                        return false;
@@ -1420,7 +1508,7 @@ u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
        if (evlist->combined_sample_type)
                return evlist->combined_sample_type;
 
-       evlist__for_each(evlist, evsel)
+       evlist__for_each_entry(evlist, evsel)
                evlist->combined_sample_type |= evsel->attr.sample_type;
 
        return evlist->combined_sample_type;
@@ -1437,7 +1525,7 @@ u64 perf_evlist__combined_branch_type(struct perf_evlist *evlist)
        struct perf_evsel *evsel;
        u64 branch_type = 0;
 
-       evlist__for_each(evlist, evsel)
+       evlist__for_each_entry(evlist, evsel)
                branch_type |= evsel->attr.branch_sample_type;
        return branch_type;
 }
@@ -1448,7 +1536,7 @@ bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
        u64 read_format = first->attr.read_format;
        u64 sample_type = first->attr.sample_type;
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                if (read_format != pos->attr.read_format)
                        return false;
        }
@@ -1505,7 +1593,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
 {
        struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
 
-       evlist__for_each_continue(evlist, pos) {
+       evlist__for_each_entry_continue(evlist, pos) {
                if (first->attr.sample_id_all != pos->attr.sample_id_all)
                        return false;
        }
@@ -1532,7 +1620,7 @@ void perf_evlist__close(struct perf_evlist *evlist)
        int nthreads = thread_map__nr(evlist->threads);
        int n;
 
-       evlist__for_each_reverse(evlist, evsel) {
+       evlist__for_each_entry_reverse(evlist, evsel) {
                n = evsel->cpus ? evsel->cpus->nr : ncpus;
                perf_evsel__close(evsel, n, nthreads);
        }
@@ -1586,7 +1674,7 @@ int perf_evlist__open(struct perf_evlist *evlist)
 
        perf_evlist__update_id_pos(evlist);
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                err = perf_evsel__open(evsel, evsel->cpus, evsel->threads);
                if (err < 0)
                        goto out_err;
@@ -1747,7 +1835,7 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
        struct perf_evsel *evsel;
        size_t printed = 0;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
                                   perf_evsel__name(evsel));
        }
@@ -1759,7 +1847,7 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist,
                               int err, char *buf, size_t size)
 {
        int printed, value;
-       char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+       char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
 
        switch (err) {
        case EACCES:
@@ -1811,7 +1899,7 @@ out_default:
 
 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
 {
-       char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+       char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
        int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
 
        switch (err) {
@@ -1849,7 +1937,7 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
        if (move_evsel == perf_evlist__first(evlist))
                return;
 
-       evlist__for_each_safe(evlist, n, evsel) {
+       evlist__for_each_entry_safe(evlist, n, evsel) {
                if (evsel->leader == move_evsel->leader)
                        list_move_tail(&evsel->node, &move);
        }
@@ -1865,7 +1953,7 @@ void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
        if (tracking_evsel->tracking)
                return;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel != tracking_evsel)
                        evsel->tracking = false;
        }
@@ -1879,7 +1967,7 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (!evsel->name)
                        continue;
                if (strcmp(str, evsel->name) == 0)
@@ -1888,3 +1976,61 @@ perf_evlist__find_evsel_by_str(struct perf_evlist *evlist,
 
        return NULL;
 }
+
+void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist,
+                                 enum bkw_mmap_state state)
+{
+       enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
+       enum action {
+               NONE,
+               PAUSE,
+               RESUME,
+       } action = NONE;
+
+       if (!evlist->backward_mmap)
+               return;
+
+       switch (old_state) {
+       case BKW_MMAP_NOTREADY: {
+               if (state != BKW_MMAP_RUNNING)
+                       goto state_err;;
+               break;
+       }
+       case BKW_MMAP_RUNNING: {
+               if (state != BKW_MMAP_DATA_PENDING)
+                       goto state_err;
+               action = PAUSE;
+               break;
+       }
+       case BKW_MMAP_DATA_PENDING: {
+               if (state != BKW_MMAP_EMPTY)
+                       goto state_err;
+               break;
+       }
+       case BKW_MMAP_EMPTY: {
+               if (state != BKW_MMAP_RUNNING)
+                       goto state_err;
+               action = RESUME;
+               break;
+       }
+       default:
+               WARN_ONCE(1, "Shouldn't get there\n");
+       }
+
+       evlist->bkw_mmap_state = state;
+
+       switch (action) {
+       case PAUSE:
+               perf_evlist__pause(evlist);
+               break;
+       case RESUME:
+               perf_evlist__resume(evlist);
+               break;
+       case NONE:
+       default:
+               break;
+       }
+
+state_err:
+       return;
+}
index d740fb877ab6f22ac912c9fdc972fc39f1bedc6a..4fd034f22d2fc2c2bd6b1050a42447313b9207cb 100644 (file)
@@ -35,6 +35,40 @@ struct perf_mmap {
        char             event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
 };
 
+static inline size_t
+perf_mmap__mmap_len(struct perf_mmap *map)
+{
+       return map->mask + 1 + page_size;
+}
+
+/*
+ * State machine of bkw_mmap_state:
+ *
+ *                     .________________(forbid)_____________.
+ *                     |                                     V
+ * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
+ *                     ^  ^              |   ^               |
+ *                     |  |__(forbid)____/   |___(forbid)___/|
+ *                     |                                     |
+ *                      \_________________(3)_______________/
+ *
+ * NOTREADY     : Backward ring buffers are not ready
+ * RUNNING      : Backward ring buffers are recording
+ * DATA_PENDING : We are required to collect data from backward ring buffers
+ * EMPTY        : We have collected data from backward ring buffers.
+ *
+ * (0): Setup backward ring buffer
+ * (1): Pause ring buffers for reading
+ * (2): Read from ring buffers
+ * (3): Resume ring buffers for recording
+ */
+enum bkw_mmap_state {
+       BKW_MMAP_NOTREADY,
+       BKW_MMAP_RUNNING,
+       BKW_MMAP_DATA_PENDING,
+       BKW_MMAP_EMPTY,
+};
+
 struct perf_evlist {
        struct list_head entries;
        struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
@@ -44,17 +78,18 @@ struct perf_evlist {
        bool             overwrite;
        bool             enabled;
        bool             has_user_cpus;
-       bool             backward;
        size_t           mmap_len;
        int              id_pos;
        int              is_pos;
        u64              combined_sample_type;
+       enum bkw_mmap_state bkw_mmap_state;
        struct {
                int     cork_fd;
                pid_t   pid;
        } workload;
        struct fdarray   pollfd;
        struct perf_mmap *mmap;
+       struct perf_mmap *backward_mmap;
        struct thread_map *threads;
        struct cpu_map    *cpus;
        struct perf_evsel *selected;
@@ -129,16 +164,24 @@ struct perf_evsel *perf_evlist__id2evsel_strict(struct perf_evlist *evlist,
 
 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
 
+void perf_evlist__toggle_bkw_mmap(struct perf_evlist *evlist, enum bkw_mmap_state state);
+
+union perf_event *perf_mmap__read_forward(struct perf_mmap *map, bool check_messup);
+union perf_event *perf_mmap__read_backward(struct perf_mmap *map);
+
+void perf_mmap__read_catchup(struct perf_mmap *md);
+void perf_mmap__consume(struct perf_mmap *md, bool overwrite);
+
 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx);
 
+union perf_event *perf_evlist__mmap_read_forward(struct perf_evlist *evlist,
+                                                int idx);
 union perf_event *perf_evlist__mmap_read_backward(struct perf_evlist *evlist,
                                                  int idx);
 void perf_evlist__mmap_read_catchup(struct perf_evlist *evlist, int idx);
 
 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx);
 
-int perf_evlist__pause(struct perf_evlist *evlist);
-int perf_evlist__resume(struct perf_evlist *evlist);
 int perf_evlist__open(struct perf_evlist *evlist);
 void perf_evlist__close(struct perf_evlist *evlist);
 
@@ -249,70 +292,70 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
                           struct perf_evsel *move_evsel);
 
 /**
- * __evlist__for_each - iterate thru all the evsels
+ * __evlist__for_each_entry - iterate thru all the evsels
  * @list: list_head instance to iterate
  * @evsel: struct evsel iterator
  */
-#define __evlist__for_each(list, evsel) \
+#define __evlist__for_each_entry(list, evsel) \
         list_for_each_entry(evsel, list, node)
 
 /**
- * evlist__for_each - iterate thru all the evsels
+ * evlist__for_each_entry - iterate thru all the evsels
  * @evlist: evlist instance to iterate
  * @evsel: struct evsel iterator
  */
-#define evlist__for_each(evlist, evsel) \
-       __evlist__for_each(&(evlist)->entries, evsel)
+#define evlist__for_each_entry(evlist, evsel) \
+       __evlist__for_each_entry(&(evlist)->entries, evsel)
 
 /**
- * __evlist__for_each_continue - continue iteration thru all the evsels
+ * __evlist__for_each_entry_continue - continue iteration thru all the evsels
  * @list: list_head instance to iterate
  * @evsel: struct evsel iterator
  */
-#define __evlist__for_each_continue(list, evsel) \
+#define __evlist__for_each_entry_continue(list, evsel) \
         list_for_each_entry_continue(evsel, list, node)
 
 /**
- * evlist__for_each_continue - continue iteration thru all the evsels
+ * evlist__for_each_entry_continue - continue iteration thru all the evsels
  * @evlist: evlist instance to iterate
  * @evsel: struct evsel iterator
  */
-#define evlist__for_each_continue(evlist, evsel) \
-       __evlist__for_each_continue(&(evlist)->entries, evsel)
+#define evlist__for_each_entry_continue(evlist, evsel) \
+       __evlist__for_each_entry_continue(&(evlist)->entries, evsel)
 
 /**
- * __evlist__for_each_reverse - iterate thru all the evsels in reverse order
+ * __evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
  * @list: list_head instance to iterate
  * @evsel: struct evsel iterator
  */
-#define __evlist__for_each_reverse(list, evsel) \
+#define __evlist__for_each_entry_reverse(list, evsel) \
         list_for_each_entry_reverse(evsel, list, node)
 
 /**
- * evlist__for_each_reverse - iterate thru all the evsels in reverse order
+ * evlist__for_each_entry_reverse - iterate thru all the evsels in reverse order
  * @evlist: evlist instance to iterate
  * @evsel: struct evsel iterator
  */
-#define evlist__for_each_reverse(evlist, evsel) \
-       __evlist__for_each_reverse(&(evlist)->entries, evsel)
+#define evlist__for_each_entry_reverse(evlist, evsel) \
+       __evlist__for_each_entry_reverse(&(evlist)->entries, evsel)
 
 /**
- * __evlist__for_each_safe - safely iterate thru all the evsels
+ * __evlist__for_each_entry_safe - safely iterate thru all the evsels
  * @list: list_head instance to iterate
  * @tmp: struct evsel temp iterator
  * @evsel: struct evsel iterator
  */
-#define __evlist__for_each_safe(list, tmp, evsel) \
+#define __evlist__for_each_entry_safe(list, tmp, evsel) \
         list_for_each_entry_safe(evsel, tmp, list, node)
 
 /**
- * evlist__for_each_safe - safely iterate thru all the evsels
+ * evlist__for_each_entry_safe - safely iterate thru all the evsels
  * @evlist: evlist instance to iterate
  * @evsel: struct evsel iterator
  * @tmp: struct evsel temp iterator
  */
-#define evlist__for_each_safe(evlist, tmp, evsel) \
-       __evlist__for_each_safe(&(evlist)->entries, tmp, evsel)
+#define evlist__for_each_entry_safe(evlist, tmp, evsel) \
+       __evlist__for_each_entry_safe(&(evlist)->entries, tmp, evsel)
 
 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
                                     struct perf_evsel *tracking_evsel);
@@ -321,4 +364,7 @@ void perf_event_attr__set_max_precise_ip(struct perf_event_attr *attr);
 
 struct perf_evsel *
 perf_evlist__find_evsel_by_str(struct perf_evlist *evlist, const char *str);
+
+struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
+                                           union perf_event *event);
 #endif /* __PERF_EVLIST_H */
index 5d7037ef7d3b7d43f0b38822e4de50131ab6b4d2..8c54df61fe642d0245c72f88a3d6a300af8f5d26 100644 (file)
@@ -200,6 +200,24 @@ void perf_evsel__set_sample_id(struct perf_evsel *evsel,
        evsel->attr.read_format |= PERF_FORMAT_ID;
 }
 
+/**
+ * perf_evsel__is_function_event - Return whether given evsel is a function
+ * trace event
+ *
+ * @evsel - evsel selector to be tested
+ *
+ * Return %true if event is function trace event
+ */
+bool perf_evsel__is_function_event(struct perf_evsel *evsel)
+{
+#define FUNCTION_EVENT "ftrace:function"
+
+       return evsel->name &&
+              !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
+
+#undef FUNCTION_EVENT
+}
+
 void perf_evsel__init(struct perf_evsel *evsel,
                      struct perf_event_attr *attr, int idx)
 {
@@ -572,6 +590,8 @@ void perf_evsel__config_callchain(struct perf_evsel *evsel,
 
        perf_evsel__set_sample_bit(evsel, CALLCHAIN);
 
+       attr->sample_max_stack = param->max_stack;
+
        if (param->record_mode == CALLCHAIN_LBR) {
                if (!opts->branch_stack) {
                        if (attr->exclude_user) {
@@ -635,7 +655,8 @@ static void apply_config_terms(struct perf_evsel *evsel,
        struct perf_event_attr *attr = &evsel->attr;
        struct callchain_param param;
        u32 dump_size = 0;
-       char *callgraph_buf = NULL;
+       int max_stack = 0;
+       const char *callgraph_buf = NULL;
 
        /* callgraph default */
        param.record_mode = callchain_param.record_mode;
@@ -662,6 +683,9 @@ static void apply_config_terms(struct perf_evsel *evsel,
                case PERF_EVSEL__CONFIG_TERM_STACK_USER:
                        dump_size = term->val.stack_user;
                        break;
+               case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
+                       max_stack = term->val.max_stack;
+                       break;
                case PERF_EVSEL__CONFIG_TERM_INHERIT:
                        /*
                         * attr->inherit should has already been set by
@@ -671,13 +695,21 @@ static void apply_config_terms(struct perf_evsel *evsel,
                         */
                        attr->inherit = term->val.inherit ? 1 : 0;
                        break;
+               case PERF_EVSEL__CONFIG_TERM_OVERWRITE:
+                       attr->write_backward = term->val.overwrite ? 1 : 0;
+                       break;
                default:
                        break;
                }
        }
 
        /* User explicitly set per-event callgraph, clear the old setting and reset. */
-       if ((callgraph_buf != NULL) || (dump_size > 0)) {
+       if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
+               if (max_stack) {
+                       param.max_stack = max_stack;
+                       if (callgraph_buf == NULL)
+                               callgraph_buf = "fp";
+               }
 
                /* parse callgraph parameters */
                if (callgraph_buf != NULL) {
@@ -747,6 +779,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
 
        attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
        attr->inherit       = !opts->no_inherit;
+       attr->write_backward = opts->overwrite ? 1 : 0;
 
        perf_evsel__set_sample_bit(evsel, IP);
        perf_evsel__set_sample_bit(evsel, TID);
@@ -1329,6 +1362,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
        PRINT_ATTRf(clockid, p_signed);
        PRINT_ATTRf(sample_regs_intr, p_hex);
        PRINT_ATTRf(aux_watermark, p_unsigned);
+       PRINT_ATTRf(sample_max_stack, p_unsigned);
 
        return ret;
 }
@@ -1347,6 +1381,9 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
        int pid = -1, err;
        enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
 
+       if (perf_missing_features.write_backward && evsel->attr.write_backward)
+               return -EINVAL;
+
        if (evsel->system_wide)
                nthreads = 1;
        else
@@ -1377,8 +1414,6 @@ fallback_missing_features:
        if (perf_missing_features.lbr_flags)
                evsel->attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
                                     PERF_SAMPLE_BRANCH_NO_CYCLES);
-       if (perf_missing_features.write_backward)
-               evsel->attr.write_backward = false;
 retry_sample_id:
        if (perf_missing_features.sample_id_all)
                evsel->attr.sample_id_all = 0;
@@ -1441,12 +1476,6 @@ retry_open:
                                err = -EINVAL;
                                goto out_close;
                        }
-
-                       if (evsel->overwrite &&
-                           perf_missing_features.write_backward) {
-                               err = -EINVAL;
-                               goto out_close;
-                       }
                }
        }
 
@@ -1484,7 +1513,10 @@ try_fallback:
         * Must probe features in the order they were added to the
         * perf_event_attr interface.
         */
-       if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
+       if (!perf_missing_features.write_backward && evsel->attr.write_backward) {
+               perf_missing_features.write_backward = true;
+               goto out_close;
+       } else if (!perf_missing_features.clockid_wrong && evsel->attr.use_clockid) {
                perf_missing_features.clockid_wrong = true;
                goto fallback_missing_features;
        } else if (!perf_missing_features.clockid && evsel->attr.use_clockid) {
@@ -1509,12 +1541,7 @@ try_fallback:
                          PERF_SAMPLE_BRANCH_NO_FLAGS))) {
                perf_missing_features.lbr_flags = true;
                goto fallback_missing_features;
-       } else if (!perf_missing_features.write_backward &&
-                       evsel->attr.write_backward) {
-               perf_missing_features.write_backward = true;
-               goto fallback_missing_features;
        }
-
 out_close:
        do {
                while (--thread >= 0) {
@@ -2239,17 +2266,11 @@ void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
        return sample->raw_data + offset;
 }
 
-u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
-                      const char *name)
+u64 format_field__intval(struct format_field *field, struct perf_sample *sample,
+                        bool needs_swap)
 {
-       struct format_field *field = perf_evsel__field(evsel, name);
-       void *ptr;
        u64 value;
-
-       if (!field)
-               return 0;
-
-       ptr = sample->raw_data + field->offset;
+       void *ptr = sample->raw_data + field->offset;
 
        switch (field->size) {
        case 1:
@@ -2267,7 +2288,7 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
                return 0;
        }
 
-       if (!evsel->needs_swap)
+       if (!needs_swap)
                return value;
 
        switch (field->size) {
@@ -2284,6 +2305,17 @@ u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
        return 0;
 }
 
+u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
+                      const char *name)
+{
+       struct format_field *field = perf_evsel__field(evsel, name);
+
+       if (!field)
+               return 0;
+
+       return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
+}
+
 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
                          char *msg, size_t msgsize)
 {
@@ -2372,6 +2404,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
         "No such device - did you specify an out-of-range profile CPU?");
                break;
        case EOPNOTSUPP:
+               if (evsel->attr.sample_period != 0)
+                       return scnprintf(msg, size, "%s",
+       "PMU Hardware doesn't support sampling/overflow-interrupts.");
                if (evsel->attr.precise_ip)
                        return scnprintf(msg, size, "%s",
        "\'precise\' request may not be supported. Try removing 'p' modifier.");
@@ -2389,6 +2424,8 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
        "We found oprofile daemon running, please stop it and try again.");
                break;
        case EINVAL:
+               if (evsel->attr.write_backward && perf_missing_features.write_backward)
+                       return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
                if (perf_missing_features.clockid)
                        return scnprintf(msg, size, "clockid feature not supported.");
                if (perf_missing_features.clockid_wrong)
@@ -2402,6 +2439,13 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
        "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
        "/bin/dmesg may provide additional information.\n"
        "No CONFIG_PERF_EVENTS=y kernel support configured?",
-                        err, strerror_r(err, sbuf, sizeof(sbuf)),
+                        err, str_error_r(err, sbuf, sizeof(sbuf)),
                         perf_evsel__name(evsel));
 }
+
+char *perf_evsel__env_arch(struct perf_evsel *evsel)
+{
+       if (evsel && evsel->evlist && evsel->evlist->env)
+               return evsel->evlist->env->arch;
+       return NULL;
+}
index c1f10159804ca93768337838fd0ce70aadb35d2c..8a4a6c9f14808032904314cf398ddcda2d2dc127 100644 (file)
@@ -44,6 +44,8 @@ enum {
        PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
        PERF_EVSEL__CONFIG_TERM_STACK_USER,
        PERF_EVSEL__CONFIG_TERM_INHERIT,
+       PERF_EVSEL__CONFIG_TERM_MAX_STACK,
+       PERF_EVSEL__CONFIG_TERM_OVERWRITE,
        PERF_EVSEL__CONFIG_TERM_MAX,
 };
 
@@ -56,7 +58,9 @@ struct perf_evsel_config_term {
                bool    time;
                char    *callgraph;
                u64     stack_user;
+               int     max_stack;
                bool    inherit;
+               bool    overwrite;
        } val;
 };
 
@@ -112,7 +116,6 @@ struct perf_evsel {
        bool                    tracking;
        bool                    per_pkg;
        bool                    precise_max;
-       bool                    overwrite;
        /* parse modifier helper */
        int                     exclude_GH;
        int                     nr_members;
@@ -259,6 +262,8 @@ static inline char *perf_evsel__strval(struct perf_evsel *evsel,
 
 struct format_field;
 
+u64 format_field__intval(struct format_field *field, struct perf_sample *sample, bool needs_swap);
+
 struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
 
 #define perf_evsel__match(evsel, t, c)         \
@@ -351,23 +356,7 @@ static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
        return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
 }
 
-/**
- * perf_evsel__is_function_event - Return whether given evsel is a function
- * trace event
- *
- * @evsel - evsel selector to be tested
- *
- * Return %true if event is function trace event
- */
-static inline bool perf_evsel__is_function_event(struct perf_evsel *evsel)
-{
-#define FUNCTION_EVENT "ftrace:function"
-
-       return evsel->name &&
-              !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
-
-#undef FUNCTION_EVENT
-}
+bool perf_evsel__is_function_event(struct perf_evsel *evsel);
 
 static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
 {
@@ -431,4 +420,6 @@ typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
 int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
                             attr__fprintf_f attr__fprintf, void *priv);
 
+char *perf_evsel__env_arch(struct perf_evsel *evsel);
+
 #endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/group.h b/tools/perf/util/group.h
new file mode 100644 (file)
index 0000000..116debe
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef GROUP_H
+#define GROUP_H 1
+
+bool arch_topdown_check_group(bool *warn);
+void arch_topdown_group_warn(void);
+
+#endif
index 08852dde1378696d24c47c0b34bded96b9d862e8..8f0db4007282fabfe99fdcdc80de480d38e30510 100644 (file)
@@ -336,7 +336,7 @@ static int write_event_desc(int fd, struct perf_header *h __maybe_unused,
        if (ret < 0)
                return ret;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                ret = do_write(fd, &evsel->attr, sz);
                if (ret < 0)
                        return ret;
@@ -801,7 +801,7 @@ static int write_group_desc(int fd, struct perf_header *h __maybe_unused,
        if (ret < 0)
                return ret;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (perf_evsel__is_group_leader(evsel) &&
                    evsel->nr_members > 1) {
                        const char *name = evsel->group_name ?: "{anon_group}";
@@ -1306,42 +1306,19 @@ static void print_total_mem(struct perf_header *ph, int fd __maybe_unused,
 static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused,
                                FILE *fp)
 {
-       u32 nr, c, i;
-       char *str, *tmp;
-       uint64_t mem_total, mem_free;
-
-       /* nr nodes */
-       nr = ph->env.nr_numa_nodes;
-       str = ph->env.numa_nodes;
-
-       for (i = 0; i < nr; i++) {
-               /* node number */
-               c = strtoul(str, &tmp, 0);
-               if (*tmp != ':')
-                       goto error;
-
-               str = tmp + 1;
-               mem_total = strtoull(str, &tmp, 0);
-               if (*tmp != ':')
-                       goto error;
+       int i;
+       struct numa_node *n;
 
-               str = tmp + 1;
-               mem_free = strtoull(str, &tmp, 0);
-               if (*tmp != ':')
-                       goto error;
+       for (i = 0; i < ph->env.nr_numa_nodes; i++) {
+               n = &ph->env.numa_nodes[i];
 
                fprintf(fp, "# node%u meminfo  : total = %"PRIu64" kB,"
                            " free = %"PRIu64" kB\n",
-                       c, mem_total, mem_free);
+                       n->node, n->mem_total, n->mem_free);
 
-               str = tmp + 1;
-               fprintf(fp, "# node%u cpu list : %s\n", c, str);
-
-               str += strlen(str) + 1;
+               fprintf(fp, "# node%u cpu list : ", n->node);
+               cpu_map__fprintf(n->map, fp);
        }
-       return;
-error:
-       fprintf(fp, "# numa topology : not available\n");
 }
 
 static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp)
@@ -1425,7 +1402,7 @@ static void print_group_desc(struct perf_header *ph, int fd __maybe_unused,
 
        session = container_of(ph, struct perf_session, header);
 
-       evlist__for_each(session->evlist, evsel) {
+       evlist__for_each_entry(session->evlist, evsel) {
                if (perf_evsel__is_group_leader(evsel) &&
                    evsel->nr_members > 1) {
                        fprintf(fp, "# group: %s{%s", evsel->group_name ?: "",
@@ -1703,7 +1680,7 @@ perf_evlist__find_by_index(struct perf_evlist *evlist, int idx)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->idx == idx)
                        return evsel;
        }
@@ -1906,11 +1883,10 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
                                 struct perf_header *ph, int fd,
                                 void *data __maybe_unused)
 {
+       struct numa_node *nodes, *n;
        ssize_t ret;
-       u32 nr, node, i;
+       u32 nr, i;
        char *str;
-       uint64_t mem_total, mem_free;
-       struct strbuf sb;
 
        /* nr nodes */
        ret = readn(fd, &nr, sizeof(nr));
@@ -1921,47 +1897,47 @@ static int process_numa_topology(struct perf_file_section *section __maybe_unuse
                nr = bswap_32(nr);
 
        ph->env.nr_numa_nodes = nr;
-       if (strbuf_init(&sb, 256) < 0)
-               return -1;
+       nodes = zalloc(sizeof(*nodes) * nr);
+       if (!nodes)
+               return -ENOMEM;
 
        for (i = 0; i < nr; i++) {
+               n = &nodes[i];
+
                /* node number */
-               ret = readn(fd, &node, sizeof(node));
-               if (ret != sizeof(node))
+               ret = readn(fd, &n->node, sizeof(u32));
+               if (ret != sizeof(n->node))
                        goto error;
 
-               ret = readn(fd, &mem_total, sizeof(u64));
+               ret = readn(fd, &n->mem_total, sizeof(u64));
                if (ret != sizeof(u64))
                        goto error;
 
-               ret = readn(fd, &mem_free, sizeof(u64));
+               ret = readn(fd, &n->mem_free, sizeof(u64));
                if (ret != sizeof(u64))
                        goto error;
 
                if (ph->needs_swap) {
-                       node = bswap_32(node);
-                       mem_total = bswap_64(mem_total);
-                       mem_free = bswap_64(mem_free);
+                       n->node      = bswap_32(n->node);
+                       n->mem_total = bswap_64(n->mem_total);
+                       n->mem_free  = bswap_64(n->mem_free);
                }
 
-               if (strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":",
-                               node, mem_total, mem_free) < 0)
-                       goto error;
-
                str = do_read_string(fd, ph);
                if (!str)
                        goto error;
 
-               /* include a NULL character at the end */
-               if (strbuf_add(&sb, str, strlen(str) + 1) < 0)
+               n->map = cpu_map__new(str);
+               if (!n->map)
                        goto error;
+
                free(str);
        }
-       ph->env.numa_nodes = strbuf_detach(&sb, NULL);
+       ph->env.numa_nodes = nodes;
        return 0;
 
 error:
-       strbuf_release(&sb);
+       free(nodes);
        return -1;
 }
 
@@ -2075,7 +2051,7 @@ static int process_group_desc(struct perf_file_section *section __maybe_unused,
        session->evlist->nr_groups = nr_groups;
 
        i = nr = 0;
-       evlist__for_each(session->evlist, evsel) {
+       evlist__for_each_entry(session->evlist, evsel) {
                if (evsel->idx == (int) desc[i].leader_idx) {
                        evsel->leader = evsel;
                        /* {anon_group} is a dummy name */
@@ -2383,7 +2359,7 @@ int perf_session__write_header(struct perf_session *session,
 
        lseek(fd, sizeof(f_header), SEEK_SET);
 
-       evlist__for_each(session->evlist, evsel) {
+       evlist__for_each_entry(session->evlist, evsel) {
                evsel->id_offset = lseek(fd, 0, SEEK_CUR);
                err = do_write(fd, evsel->id, evsel->ids * sizeof(u64));
                if (err < 0) {
@@ -2394,7 +2370,7 @@ int perf_session__write_header(struct perf_session *session,
 
        attr_offset = lseek(fd, 0, SEEK_CUR);
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                f_attr = (struct perf_file_attr){
                        .attr = evsel->attr,
                        .ids  = {
@@ -2828,7 +2804,7 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
 {
        struct perf_evsel *pos;
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
                    perf_evsel__prepare_tracepoint_event(pos, pevent))
                        return -1;
@@ -3127,7 +3103,7 @@ int perf_event__synthesize_attrs(struct perf_tool *tool,
        struct perf_evsel *evsel;
        int err = 0;
 
-       evlist__for_each(session->evlist, evsel) {
+       evlist__for_each_entry(session->evlist, evsel) {
                err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids,
                                                  evsel->id, process);
                if (err) {
index d62ccaeeadd6e71586479e6d6dc4d84fe7f716f4..2821f8d77e5208116cbd5832f9b9833d0764221e 100644 (file)
@@ -1,4 +1,6 @@
 #include "cache.h"
+#include "config.h"
+#include <stdio.h>
 #include <subcmd/help.h>
 #include "../builtin.h"
 #include "levenshtein.h"
index d1f19e0012d44d907a65ac4d1ca281eab38e9b70..a18d142cdca3cc2748d1fdad6f67dfed3a69746a 100644 (file)
@@ -79,7 +79,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
 
        len = thread__comm_len(h->thread);
        if (hists__new_col_len(hists, HISTC_COMM, len))
-               hists__set_col_len(hists, HISTC_THREAD, len + 6);
+               hists__set_col_len(hists, HISTC_THREAD, len + 8);
 
        if (h->ms.map) {
                len = dso__name_len(h->ms.map->dso);
@@ -352,86 +352,114 @@ void hists__delete_entries(struct hists *hists)
  * histogram, sorted on item, collects periods
  */
 
-static struct hist_entry *hist_entry__new(struct hist_entry *template,
-                                         bool sample_self)
+static int hist_entry__init(struct hist_entry *he,
+                           struct hist_entry *template,
+                           bool sample_self)
 {
-       size_t callchain_size = 0;
-       struct hist_entry *he;
+       *he = *template;
 
-       if (symbol_conf.use_callchain)
-               callchain_size = sizeof(struct callchain_root);
+       if (symbol_conf.cumulate_callchain) {
+               he->stat_acc = malloc(sizeof(he->stat));
+               if (he->stat_acc == NULL)
+                       return -ENOMEM;
+               memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
+               if (!sample_self)
+                       memset(&he->stat, 0, sizeof(he->stat));
+       }
 
-       he = zalloc(sizeof(*he) + callchain_size);
+       map__get(he->ms.map);
 
-       if (he != NULL) {
-               *he = *template;
+       if (he->branch_info) {
+               /*
+                * This branch info is (a part of) allocated from
+                * sample__resolve_bstack() and will be freed after
+                * adding new entries.  So we need to save a copy.
+                */
+               he->branch_info = malloc(sizeof(*he->branch_info));
+               if (he->branch_info == NULL) {
+                       map__zput(he->ms.map);
+                       free(he->stat_acc);
+                       return -ENOMEM;
+               }
+
+               memcpy(he->branch_info, template->branch_info,
+                      sizeof(*he->branch_info));
+
+               map__get(he->branch_info->from.map);
+               map__get(he->branch_info->to.map);
+       }
 
-               if (symbol_conf.cumulate_callchain) {
-                       he->stat_acc = malloc(sizeof(he->stat));
-                       if (he->stat_acc == NULL) {
-                               free(he);
-                               return NULL;
+       if (he->mem_info) {
+               map__get(he->mem_info->iaddr.map);
+               map__get(he->mem_info->daddr.map);
+       }
+
+       if (symbol_conf.use_callchain)
+               callchain_init(he->callchain);
+
+       if (he->raw_data) {
+               he->raw_data = memdup(he->raw_data, he->raw_size);
+
+               if (he->raw_data == NULL) {
+                       map__put(he->ms.map);
+                       if (he->branch_info) {
+                               map__put(he->branch_info->from.map);
+                               map__put(he->branch_info->to.map);
+                               free(he->branch_info);
+                       }
+                       if (he->mem_info) {
+                               map__put(he->mem_info->iaddr.map);
+                               map__put(he->mem_info->daddr.map);
                        }
-                       memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
-                       if (!sample_self)
-                               memset(&he->stat, 0, sizeof(he->stat));
+                       free(he->stat_acc);
+                       return -ENOMEM;
                }
+       }
+       INIT_LIST_HEAD(&he->pairs.node);
+       thread__get(he->thread);
 
-               map__get(he->ms.map);
+       if (!symbol_conf.report_hierarchy)
+               he->leaf = true;
 
-               if (he->branch_info) {
-                       /*
-                        * This branch info is (a part of) allocated from
-                        * sample__resolve_bstack() and will be freed after
-                        * adding new entries.  So we need to save a copy.
-                        */
-                       he->branch_info = malloc(sizeof(*he->branch_info));
-                       if (he->branch_info == NULL) {
-                               map__zput(he->ms.map);
-                               free(he->stat_acc);
-                               free(he);
-                               return NULL;
-                       }
+       return 0;
+}
 
-                       memcpy(he->branch_info, template->branch_info,
-                              sizeof(*he->branch_info));
+static void *hist_entry__zalloc(size_t size)
+{
+       return zalloc(size + sizeof(struct hist_entry));
+}
 
-                       map__get(he->branch_info->from.map);
-                       map__get(he->branch_info->to.map);
-               }
+static void hist_entry__free(void *ptr)
+{
+       free(ptr);
+}
 
-               if (he->mem_info) {
-                       map__get(he->mem_info->iaddr.map);
-                       map__get(he->mem_info->daddr.map);
-               }
+static struct hist_entry_ops default_ops = {
+       .new    = hist_entry__zalloc,
+       .free   = hist_entry__free,
+};
 
-               if (symbol_conf.use_callchain)
-                       callchain_init(he->callchain);
+static struct hist_entry *hist_entry__new(struct hist_entry *template,
+                                         bool sample_self)
+{
+       struct hist_entry_ops *ops = template->ops;
+       size_t callchain_size = 0;
+       struct hist_entry *he;
+       int err = 0;
 
-               if (he->raw_data) {
-                       he->raw_data = memdup(he->raw_data, he->raw_size);
+       if (!ops)
+               ops = template->ops = &default_ops;
 
-                       if (he->raw_data == NULL) {
-                               map__put(he->ms.map);
-                               if (he->branch_info) {
-                                       map__put(he->branch_info->from.map);
-                                       map__put(he->branch_info->to.map);
-                                       free(he->branch_info);
-                               }
-                               if (he->mem_info) {
-                                       map__put(he->mem_info->iaddr.map);
-                                       map__put(he->mem_info->daddr.map);
-                               }
-                               free(he->stat_acc);
-                               free(he);
-                               return NULL;
-                       }
-               }
-               INIT_LIST_HEAD(&he->pairs.node);
-               thread__get(he->thread);
+       if (symbol_conf.use_callchain)
+               callchain_size = sizeof(struct callchain_root);
 
-               if (!symbol_conf.report_hierarchy)
-                       he->leaf = true;
+       he = ops->new(callchain_size);
+       if (he) {
+               err = hist_entry__init(he, template, sample_self);
+               if (err) {
+                       ops->free(he);
+                       he = NULL;
+               }
        }
 
        return he;
@@ -531,13 +559,15 @@ out:
        return he;
 }
 
-struct hist_entry *__hists__add_entry(struct hists *hists,
-                                     struct addr_location *al,
-                                     struct symbol *sym_parent,
-                                     struct branch_info *bi,
-                                     struct mem_info *mi,
-                                     struct perf_sample *sample,
-                                     bool sample_self)
+static struct hist_entry*
+__hists__add_entry(struct hists *hists,
+                  struct addr_location *al,
+                  struct symbol *sym_parent,
+                  struct branch_info *bi,
+                  struct mem_info *mi,
+                  struct perf_sample *sample,
+                  bool sample_self,
+                  struct hist_entry_ops *ops)
 {
        struct hist_entry entry = {
                .thread = al->thread,
@@ -564,11 +594,37 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
                .transaction = sample->transaction,
                .raw_data = sample->raw_data,
                .raw_size = sample->raw_size,
+               .ops = ops,
        };
 
        return hists__findnew_entry(hists, &entry, al, sample_self);
 }
 
+struct hist_entry *hists__add_entry(struct hists *hists,
+                                   struct addr_location *al,
+                                   struct symbol *sym_parent,
+                                   struct branch_info *bi,
+                                   struct mem_info *mi,
+                                   struct perf_sample *sample,
+                                   bool sample_self)
+{
+       return __hists__add_entry(hists, al, sym_parent, bi, mi,
+                                 sample, sample_self, NULL);
+}
+
+struct hist_entry *hists__add_entry_ops(struct hists *hists,
+                                       struct hist_entry_ops *ops,
+                                       struct addr_location *al,
+                                       struct symbol *sym_parent,
+                                       struct branch_info *bi,
+                                       struct mem_info *mi,
+                                       struct perf_sample *sample,
+                                       bool sample_self)
+{
+       return __hists__add_entry(hists, al, sym_parent, bi, mi,
+                                 sample, sample_self, ops);
+}
+
 static int
 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
                    struct addr_location *al __maybe_unused)
@@ -622,8 +678,8 @@ iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al
         */
        sample->period = cost;
 
-       he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
-                               sample, true);
+       he = hists__add_entry(hists, al, iter->parent, NULL, mi,
+                             sample, true);
        if (!he)
                return -ENOMEM;
 
@@ -727,8 +783,8 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a
        sample->period = 1;
        sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
 
-       he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
-                               sample, true);
+       he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
+                             sample, true);
        if (he == NULL)
                return -ENOMEM;
 
@@ -764,8 +820,8 @@ iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location
        struct perf_sample *sample = iter->sample;
        struct hist_entry *he;
 
-       he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
-                               sample, true);
+       he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
+                             sample, true);
        if (he == NULL)
                return -ENOMEM;
 
@@ -825,8 +881,8 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
        struct hist_entry *he;
        int err = 0;
 
-       he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
-                               sample, true);
+       he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
+                             sample, true);
        if (he == NULL)
                return -ENOMEM;
 
@@ -900,8 +956,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
                }
        }
 
-       he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
-                               sample, false);
+       he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
+                             sample, false);
        if (he == NULL)
                return -ENOMEM;
 
@@ -1043,6 +1099,8 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
 
 void hist_entry__delete(struct hist_entry *he)
 {
+       struct hist_entry_ops *ops = he->ops;
+
        thread__zput(he->thread);
        map__zput(he->ms.map);
 
@@ -1067,7 +1125,7 @@ void hist_entry__delete(struct hist_entry *he)
        free_callchain(he->callchain);
        free(he->trace_output);
        free(he->raw_data);
-       free(he);
+       ops->free(he);
 }
 
 /*
@@ -1081,7 +1139,7 @@ int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
                                   struct perf_hpp_fmt *fmt, int printed)
 {
        if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
-               const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
+               const int width = fmt->width(fmt, hpp, he->hists);
                if (printed < width) {
                        advance_hpp(hpp, printed);
                        printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
@@ -2199,7 +2257,7 @@ size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
        struct perf_evsel *pos;
        size_t ret = 0;
 
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
                ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
        }
index 7b54ccf1b7370ab2336bd16a1c18a7c932b7e41d..49aa4fac148f996d661d782aab7861dd42e2f9dd 100644 (file)
@@ -10,6 +10,7 @@
 #include "ui/progress.h"
 
 struct hist_entry;
+struct hist_entry_ops;
 struct addr_location;
 struct symbol;
 
@@ -120,13 +121,23 @@ extern const struct hist_iter_ops hist_iter_branch;
 extern const struct hist_iter_ops hist_iter_mem;
 extern const struct hist_iter_ops hist_iter_cumulative;
 
-struct hist_entry *__hists__add_entry(struct hists *hists,
-                                     struct addr_location *al,
-                                     struct symbol *parent,
-                                     struct branch_info *bi,
-                                     struct mem_info *mi,
-                                     struct perf_sample *sample,
-                                     bool sample_self);
+struct hist_entry *hists__add_entry(struct hists *hists,
+                                   struct addr_location *al,
+                                   struct symbol *parent,
+                                   struct branch_info *bi,
+                                   struct mem_info *mi,
+                                   struct perf_sample *sample,
+                                   bool sample_self);
+
+struct hist_entry *hists__add_entry_ops(struct hists *hists,
+                                       struct hist_entry_ops *ops,
+                                       struct addr_location *al,
+                                       struct symbol *sym_parent,
+                                       struct branch_info *bi,
+                                       struct mem_info *mi,
+                                       struct perf_sample *sample,
+                                       bool sample_self);
+
 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
                         int max_stack_depth, void *arg);
 
@@ -159,7 +170,8 @@ void events_stats__inc(struct events_stats *stats, u32 type);
 size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
 
 size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
-                     int max_cols, float min_pcnt, FILE *fp);
+                     int max_cols, float min_pcnt, FILE *fp,
+                     bool use_callchain);
 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp);
 
 void hists__filter_by_dso(struct hists *hists);
@@ -214,9 +226,9 @@ struct perf_hpp {
 struct perf_hpp_fmt {
        const char *name;
        int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
-                     struct perf_evsel *evsel);
+                     struct hists *hists);
        int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
-                    struct perf_evsel *evsel);
+                    struct hists *hists);
        int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
                     struct hist_entry *he);
        int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
diff --git a/tools/perf/util/include/asm/byteorder.h b/tools/perf/util/include/asm/byteorder.h
deleted file mode 100644 (file)
index 2a9bdc0..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm/types.h>
-#include "../../../../include/uapi/linux/swab.h"
diff --git a/tools/perf/util/include/asm/unistd_32.h b/tools/perf/util/include/asm/unistd_32.h
deleted file mode 100644 (file)
index 8b13789..0000000
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/tools/perf/util/include/asm/unistd_64.h b/tools/perf/util/include/asm/unistd_64.h
deleted file mode 100644 (file)
index 8b13789..0000000
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/tools/perf/util/include/linux/const.h b/tools/perf/util/include/linux/const.h
deleted file mode 100644 (file)
index c10a35e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../../include/uapi/linux/const.h"
index 9df99608556332289b499622f11a5a402e6b875a..749e6f2e37ca800b6fb4ab5009ed13cfbe2ab956 100644 (file)
@@ -422,7 +422,8 @@ static int intel_bts_get_branch_type(struct intel_bts_queue *btsq,
 }
 
 static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
-                                   struct auxtrace_buffer *buffer)
+                                   struct auxtrace_buffer *buffer,
+                                   struct thread *thread)
 {
        struct branch *branch;
        size_t sz, bsz = sizeof(struct branch);
@@ -444,6 +445,12 @@ static int intel_bts_process_buffer(struct intel_bts_queue *btsq,
                if (!branch->from && !branch->to)
                        continue;
                intel_bts_get_branch_type(btsq, branch);
+               if (btsq->bts->synth_opts.thread_stack)
+                       thread_stack__event(thread, btsq->sample_flags,
+                                           le64_to_cpu(branch->from),
+                                           le64_to_cpu(branch->to),
+                                           btsq->intel_pt_insn.length,
+                                           buffer->buffer_nr + 1);
                if (filter && !(filter & btsq->sample_flags))
                        continue;
                err = intel_bts_synth_branch_sample(btsq, branch);
@@ -507,12 +514,13 @@ static int intel_bts_process_queue(struct intel_bts_queue *btsq, u64 *timestamp)
                goto out_put;
        }
 
-       if (!btsq->bts->synth_opts.callchain && thread &&
+       if (!btsq->bts->synth_opts.callchain &&
+           !btsq->bts->synth_opts.thread_stack && thread &&
            (!old_buffer || btsq->bts->sampling_mode ||
             (btsq->bts->snapshot_mode && !buffer->consecutive)))
                thread_stack__set_trace_nr(thread, buffer->buffer_nr + 1);
 
-       err = intel_bts_process_buffer(btsq, buffer);
+       err = intel_bts_process_buffer(btsq, buffer, thread);
 
        auxtrace_buffer__drop_data(buffer);
 
@@ -777,7 +785,7 @@ static int intel_bts_synth_events(struct intel_bts *bts,
        u64 id;
        int err;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type == bts->pmu_type && evsel->ids) {
                        found = true;
                        break;
@@ -905,10 +913,14 @@ int intel_bts_process_auxtrace_info(union perf_event *event,
        if (dump_trace)
                return 0;
 
-       if (session->itrace_synth_opts && session->itrace_synth_opts->set)
+       if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
                bts->synth_opts = *session->itrace_synth_opts;
-       else
+       } else {
                itrace_synth_opts__set_default(&bts->synth_opts);
+               if (session->itrace_synth_opts)
+                       bts->synth_opts.thread_stack =
+                               session->itrace_synth_opts->thread_stack;
+       }
 
        if (bts->synth_opts.calls)
                bts->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
index 0611d619a42e645baa4dfbbecca099b38ee23f21..9b742ea8bfe8493b8220c1e34ca91ac784fd73c3 100644 (file)
@@ -7,8 +7,11 @@ $(OUTPUT)util/intel-pt-decoder/inat-tables.c: $(inat_tables_script) $(inat_table
        $(call rule_mkdir)
        @$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@ || rm -f $@
 
+# Busybox's diff doesn't have -I, avoid warning in the case
+
 $(OUTPUT)util/intel-pt-decoder/intel-pt-insn-decoder.o: util/intel-pt-decoder/intel-pt-insn-decoder.c util/intel-pt-decoder/inat.c $(OUTPUT)util/intel-pt-decoder/inat-tables.c
-       @(test -d ../../kernel -a -d ../../tools -a -d ../perf && (( \
+       @(diff -I 2>&1 | grep -q 'option requires an argument' && \
+       test -d ../../kernel -a -d ../../tools -a -d ../perf && (( \
        diff -B -I'^#include' util/intel-pt-decoder/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
        diff -B -I'^#include' util/intel-pt-decoder/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
        diff -B util/intel-pt-decoder/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
index 137196990012727781728b080cb5c4b799e3332d..551ff6f640be85fef9362034a487ff5d4ea0c807 100644 (file)
@@ -39,6 +39,7 @@
 #include "auxtrace.h"
 #include "tsc.h"
 #include "intel-pt.h"
+#include "config.h"
 
 #include "intel-pt-decoder/intel-pt-log.h"
 #include "intel-pt-decoder/intel-pt-decoder.h"
@@ -556,7 +557,7 @@ static bool intel_pt_exclude_kernel(struct intel_pt *pt)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(pt->session->evlist, evsel) {
+       evlist__for_each_entry(pt->session->evlist, evsel) {
                if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
                    !evsel->attr.exclude_kernel)
                        return false;
@@ -572,7 +573,7 @@ static bool intel_pt_return_compression(struct intel_pt *pt)
        if (!pt->noretcomp_bit)
                return true;
 
-       evlist__for_each(pt->session->evlist, evsel) {
+       evlist__for_each_entry(pt->session->evlist, evsel) {
                if (intel_pt_get_config(pt, &evsel->attr, &config) &&
                    (config & pt->noretcomp_bit))
                        return false;
@@ -592,7 +593,7 @@ static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
        for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
                config >>= 1;
 
-       evlist__for_each(pt->session->evlist, evsel) {
+       evlist__for_each_entry(pt->session->evlist, evsel) {
                if (intel_pt_get_config(pt, &evsel->attr, &config))
                        return (config & pt->mtc_freq_bits) >> shift;
        }
@@ -608,7 +609,7 @@ static bool intel_pt_timeless_decoding(struct intel_pt *pt)
        if (!pt->tsc_bit || !pt->cap_user_time_zero)
                return true;
 
-       evlist__for_each(pt->session->evlist, evsel) {
+       evlist__for_each_entry(pt->session->evlist, evsel) {
                if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
                        return true;
                if (intel_pt_get_config(pt, &evsel->attr, &config)) {
@@ -625,7 +626,7 @@ static bool intel_pt_tracing_kernel(struct intel_pt *pt)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(pt->session->evlist, evsel) {
+       evlist__for_each_entry(pt->session->evlist, evsel) {
                if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
                    !evsel->attr.exclude_kernel)
                        return true;
@@ -642,7 +643,7 @@ static bool intel_pt_have_tsc(struct intel_pt *pt)
        if (!pt->tsc_bit)
                return false;
 
-       evlist__for_each(pt->session->evlist, evsel) {
+       evlist__for_each_entry(pt->session->evlist, evsel) {
                if (intel_pt_get_config(pt, &evsel->attr, &config)) {
                        if (config & pt->tsc_bit)
                                have_tsc = true;
@@ -1233,7 +1234,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
        if (!(state->type & INTEL_PT_BRANCH))
                return 0;
 
-       if (pt->synth_opts.callchain)
+       if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
                thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
                                    state->to_ip, ptq->insn_len,
                                    state->trace_nr);
@@ -1850,7 +1851,7 @@ static int intel_pt_synth_events(struct intel_pt *pt,
        u64 id;
        int err;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type == pt->pmu_type && evsel->ids) {
                        found = true;
                        break;
@@ -1930,7 +1931,7 @@ static int intel_pt_synth_events(struct intel_pt *pt,
                pt->sample_transactions = true;
                pt->transactions_id = id;
                id += 1;
-               evlist__for_each(evlist, evsel) {
+               evlist__for_each_entry(evlist, evsel) {
                        if (evsel->id && evsel->id[0] == pt->transactions_id) {
                                if (evsel->name)
                                        zfree(&evsel->name);
@@ -1968,7 +1969,7 @@ static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each_reverse(evlist, evsel) {
+       evlist__for_each_entry_reverse(evlist, evsel) {
                const char *name = perf_evsel__name(evsel);
 
                if (!strcmp(name, "sched:sched_switch"))
@@ -1982,7 +1983,7 @@ static bool intel_pt_find_switch(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.context_switch)
                        return true;
        }
@@ -2136,6 +2137,9 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
                        pt->synth_opts.branches = false;
                        pt->synth_opts.callchain = true;
                }
+               if (session->itrace_synth_opts)
+                       pt->synth_opts.thread_stack =
+                               session->itrace_synth_opts->thread_stack;
        }
 
        if (pt->synth_opts.log)
index aa6877d36858288c9027d83ab2390b82a2370fc2..020b9ca1b47ed8c2d4f0db22a4c6918b5ca5e7b6 100644 (file)
@@ -57,21 +57,21 @@ static inline struct int_node *intlist__next(struct int_node *in)
 }
 
 /**
- * intlist_for_each      - iterate over a intlist
+ * intlist__for_each_entry      - iterate over a intlist
  * @pos:       the &struct int_node to use as a loop cursor.
  * @ilist:     the &struct intlist for loop.
  */
-#define intlist__for_each(pos, ilist)  \
+#define intlist__for_each_entry(pos, ilist)    \
        for (pos = intlist__first(ilist); pos; pos = intlist__next(pos))
 
 /**
- * intlist_for_each_safe - iterate over a intlist safe against removal of
+ * intlist__for_each_entry_safe - iterate over a intlist safe against removal of
  *                         int_node
  * @pos:       the &struct int_node to use as a loop cursor.
  * @n:         another &struct int_node to use as temporary storage.
  * @ilist:     the &struct intlist for loop.
  */
-#define intlist__for_each_safe(pos, n, ilist)  \
+#define intlist__for_each_entry_safe(pos, n, ilist)    \
        for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\
             pos = n, n = intlist__next(n))
 #endif /* __PERF_INTLIST_H */
index 86afe9618bb0d917306ff3912203f81b6fee40b4..9f3305f6b6d5871ebde2c27f4ca1b2ac59953340 100644 (file)
@@ -108,7 +108,7 @@ jit_validate_events(struct perf_session *session)
        /*
         * check that all events use CLOCK_MONOTONIC
         */
-       evlist__for_each(session->evlist, evsel) {
+       evlist__for_each_entry(session->evlist, evsel) {
                if (evsel->attr.use_clockid == 0 || evsel->attr.clockid != CLOCK_MONOTONIC)
                        return -1;
        }
index e521d1516df6eac70c6c2b3d9f925dd9708f4907..f616e4f65b6744813bc076c13dd1fd51042ad9a7 100644 (file)
@@ -1,5 +1,7 @@
-#include "cache.h"
 #include "levenshtein.h"
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
 
 /*
  * This function implements the Damerau-Levenshtein algorithm to
diff --git a/tools/perf/util/libunwind/arm64.c b/tools/perf/util/libunwind/arm64.c
new file mode 100644 (file)
index 0000000..6559bc5
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * This file setups defines to compile arch specific binary from the
+ * generic one.
+ *
+ * The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch
+ * name and the defination of this function is included directly from
+ * 'arch/arm64/util/unwind-libunwind.c', to make sure that this function
+ * is defined no matter what arch the host is.
+ *
+ * Finally, the arch specific unwind methods are exported which will
+ * be assigned to each arm64 thread.
+ */
+
+#define REMOTE_UNWIND_LIBUNWIND
+
+/* Define arch specific functions & regs for libunwind, should be
+ * defined before including "unwind.h"
+ */
+#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arm64_reg_id(regnum)
+#define LIBUNWIND__ARCH_REG_IP PERF_REG_ARM64_PC
+#define LIBUNWIND__ARCH_REG_SP PERF_REG_ARM64_SP
+
+#include "unwind.h"
+#include "debug.h"
+#include "libunwind-aarch64.h"
+#include <../../../../arch/arm64/include/uapi/asm/perf_regs.h>
+#include "../../arch/arm64/util/unwind-libunwind.c"
+
+/* NO_LIBUNWIND_DEBUG_FRAME is a feature flag for local libunwind,
+ * assign NO_LIBUNWIND_DEBUG_FRAME_AARCH64 to it for compiling arm64
+ * unwind methods.
+ */
+#undef NO_LIBUNWIND_DEBUG_FRAME
+#ifdef NO_LIBUNWIND_DEBUG_FRAME_AARCH64
+#define NO_LIBUNWIND_DEBUG_FRAME
+#endif
+#include "util/unwind-libunwind-local.c"
+
+struct unwind_libunwind_ops *
+arm64_unwind_libunwind_ops = &_unwind_libunwind_ops;
diff --git a/tools/perf/util/libunwind/x86_32.c b/tools/perf/util/libunwind/x86_32.c
new file mode 100644 (file)
index 0000000..957ffff
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * This file setups defines to compile arch specific binary from the
+ * generic one.
+ *
+ * The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch
+ * name and the defination of this function is included directly from
+ * 'arch/x86/util/unwind-libunwind.c', to make sure that this function
+ * is defined no matter what arch the host is.
+ *
+ * Finally, the arch specific unwind methods are exported which will
+ * be assigned to each x86 thread.
+ */
+
+#define REMOTE_UNWIND_LIBUNWIND
+
+/* Define arch specific functions & regs for libunwind, should be
+ * defined before including "unwind.h"
+ */
+#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__x86_reg_id(regnum)
+#define LIBUNWIND__ARCH_REG_IP PERF_REG_X86_IP
+#define LIBUNWIND__ARCH_REG_SP PERF_REG_X86_SP
+
+#include "unwind.h"
+#include "debug.h"
+#include "libunwind-x86.h"
+#include <../../../../arch/x86/include/uapi/asm/perf_regs.h>
+
+/* HAVE_ARCH_X86_64_SUPPORT is used in'arch/x86/util/unwind-libunwind.c'
+ * for x86_32, we undef it to compile code for x86_32 only.
+ */
+#undef HAVE_ARCH_X86_64_SUPPORT
+#include "../../arch/x86/util/unwind-libunwind.c"
+
+/* Explicitly define NO_LIBUNWIND_DEBUG_FRAME, because non-ARM has no
+ * dwarf_find_debug_frame() function.
+ */
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+#define NO_LIBUNWIND_DEBUG_FRAME
+#endif
+#include "util/unwind-libunwind-local.c"
+
+struct unwind_libunwind_ops *
+x86_32_unwind_libunwind_ops = &_unwind_libunwind_ops;
index 33071d6159bccd9c7b011d22e9550951f745faa9..bf7216b8731ddbbeb9326181857962d54c6c3d03 100644 (file)
@@ -3,11 +3,14 @@
  * Copyright (C) 2015, Huawei Inc.
  */
 
+#include <errno.h>
 #include <limits.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include "debug.h"
 #include "llvm-utils.h"
+#include "config.h"
+#include "util.h"
 
 #define CLANG_BPF_CMD_DEFAULT_TEMPLATE                         \
                "$CLANG_EXEC -D__KERNEL__ -D__NR_CPUS__=$NR_CPUS "\
@@ -42,6 +45,8 @@ int perf_llvm_config(const char *var, const char *value)
                llvm_param.kbuild_dir = strdup(value);
        else if (!strcmp(var, "kbuild-opts"))
                llvm_param.kbuild_opts = strdup(value);
+       else if (!strcmp(var, "dump-obj"))
+               llvm_param.dump_obj = !!perf_config_bool(var, value);
        else
                return -1;
        llvm_param.user_set_param = true;
@@ -103,7 +108,7 @@ read_from_pipe(const char *cmd, void **p_buf, size_t *p_read_sz)
        file = popen(cmd, "r");
        if (!file) {
                pr_err("ERROR: unable to popen cmd: %s\n",
-                      strerror_r(errno, serr, sizeof(serr)));
+                      str_error_r(errno, serr, sizeof(serr)));
                return -EINVAL;
        }
 
@@ -137,7 +142,7 @@ read_from_pipe(const char *cmd, void **p_buf, size_t *p_read_sz)
 
        if (ferror(file)) {
                pr_err("ERROR: error occurred when reading from pipe: %s\n",
-                      strerror_r(errno, serr, sizeof(serr)));
+                      str_error_r(errno, serr, sizeof(serr)));
                err = -EIO;
                goto errout;
        }
@@ -326,6 +331,42 @@ get_kbuild_opts(char **kbuild_dir, char **kbuild_include_opts)
        pr_debug("include option is set to %s\n", *kbuild_include_opts);
 }
 
+static void
+dump_obj(const char *path, void *obj_buf, size_t size)
+{
+       char *obj_path = strdup(path);
+       FILE *fp;
+       char *p;
+
+       if (!obj_path) {
+               pr_warning("WARNING: No enough memory, skip object dumping\n");
+               return;
+       }
+
+       p = strrchr(obj_path, '.');
+       if (!p || (strcmp(p, ".c") != 0)) {
+               pr_warning("WARNING: invalid llvm source path: '%s', skip object dumping\n",
+                          obj_path);
+               goto out;
+       }
+
+       p[1] = 'o';
+       fp = fopen(obj_path, "wb");
+       if (!fp) {
+               pr_warning("WARNING: failed to open '%s': %s, skip object dumping\n",
+                          obj_path, strerror(errno));
+               goto out;
+       }
+
+       pr_info("LLVM: dumping %s\n", obj_path);
+       if (fwrite(obj_buf, size, 1, fp) != 1)
+               pr_warning("WARNING: failed to write to file '%s': %s, skip object dumping\n",
+                          obj_path, strerror(errno));
+       fclose(fp);
+out:
+       free(obj_path);
+}
+
 int llvm__compile_bpf(const char *path, void **p_obj_buf,
                      size_t *p_obj_buf_sz)
 {
@@ -343,7 +384,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
        if (path[0] != '-' && realpath(path, abspath) == NULL) {
                err = errno;
                pr_err("ERROR: problems with path %s: %s\n",
-                      path, strerror_r(err, serr, sizeof(serr)));
+                      path, str_error_r(err, serr, sizeof(serr)));
                return -err;
        }
 
@@ -371,7 +412,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
        if (nr_cpus_avail <= 0) {
                pr_err(
 "WARNING:\tunable to get available CPUs in this system: %s\n"
-"        \tUse 128 instead.\n", strerror_r(errno, serr, sizeof(serr)));
+"        \tUse 128 instead.\n", str_error_r(errno, serr, sizeof(serr)));
                nr_cpus_avail = 128;
        }
        snprintf(nr_cpus_avail_str, sizeof(nr_cpus_avail_str), "%d",
@@ -411,6 +452,10 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
 
        free(kbuild_dir);
        free(kbuild_include_opts);
+
+       if (llvm_param.dump_obj)
+               dump_obj(path, obj_buf, obj_buf_sz);
+
        if (!p_obj_buf)
                free(obj_buf);
        else
index 23b9a743fe72f9f037e2527dbd42f8f18bc81b9e..9f501cef06a1c52e365ae97517e5d3d9144c4233 100644 (file)
@@ -29,6 +29,11 @@ struct llvm_param {
         * compiling. Should not be used for dynamic compiling.
         */
        const char *kbuild_opts;
+       /*
+        * Default is false. If set to true, write compiling result
+        * to object file.
+        */
+       bool dump_obj;
        /*
         * Default is false. If one of the above fields is set by user
         * explicitly then user_set_llvm is set to true. This is used
index b1772180c82078150ff31196e1862e0c76e1d086..bc2cdbd09a253078e99037fcb189737273e7f33a 100644 (file)
@@ -138,8 +138,10 @@ void machine__exit(struct machine *machine)
 
 void machine__delete(struct machine *machine)
 {
-       machine__exit(machine);
-       free(machine);
+       if (machine) {
+               machine__exit(machine);
+               free(machine);
+       }
 }
 
 void machines__init(struct machines *machines)
@@ -1353,11 +1355,16 @@ int machine__process_mmap2_event(struct machine *machine,
        if (map == NULL)
                goto out_problem_map;
 
-       thread__insert_map(thread, map);
+       ret = thread__insert_map(thread, map);
+       if (ret)
+               goto out_problem_insert;
+
        thread__put(thread);
        map__put(map);
        return 0;
 
+out_problem_insert:
+       map__put(map);
 out_problem_map:
        thread__put(thread);
 out_problem:
@@ -1403,11 +1410,16 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
        if (map == NULL)
                goto out_problem_map;
 
-       thread__insert_map(thread, map);
+       ret = thread__insert_map(thread, map);
+       if (ret)
+               goto out_problem_insert;
+
        thread__put(thread);
        map__put(map);
        return 0;
 
+out_problem_insert:
+       map__put(map);
 out_problem_map:
        thread__put(thread);
 out_problem:
index b19bcd3b7128355f67d93232b695966907a5e473..728129ac653a7e371fae4ebf13c67bd7e4d3de58 100644 (file)
@@ -15,6 +15,7 @@
 #include "debug.h"
 #include "machine.h"
 #include <linux/string.h>
+#include "unwind.h"
 
 static void __maps__insert(struct maps *maps, struct map *map);
 
@@ -311,6 +312,9 @@ int map__load(struct map *map, symbol_filter_t filter)
                        pr_warning("%.*s was updated (is prelink enabled?). "
                                "Restart the long running apps that use it!\n",
                                   (int)real_len, name);
+               } else if (filter) {
+                       pr_warning("no symbols passed the given filter.\n");
+                       return -2;      /* Empty but maybe by the filter */
                } else {
                        pr_warning("no symbols found in %s, maybe install "
                                   "a debug package?\n", name);
@@ -744,9 +748,10 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
 /*
  * XXX This should not really _copy_ te maps, but refcount them.
  */
-int map_groups__clone(struct map_groups *mg,
+int map_groups__clone(struct thread *thread,
                      struct map_groups *parent, enum map_type type)
 {
+       struct map_groups *mg = thread->mg;
        int err = -ENOMEM;
        struct map *map;
        struct maps *maps = &parent->maps[type];
@@ -757,6 +762,11 @@ int map_groups__clone(struct map_groups *mg,
                struct map *new = map__clone(map);
                if (new == NULL)
                        goto out_unlock;
+
+               err = unwind__prepare_access(thread, new, NULL);
+               if (err)
+                       goto out_unlock;
+
                map_groups__insert(mg, new);
                map__put(new);
        }
index 7309d64ce39e17d74416a1af04d57249c0cf5295..d83396ceecba686450f3c398593b1824a8f8efbd 100644 (file)
@@ -194,7 +194,7 @@ struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name,
                                          struct map **mapp, symbol_filter_t filter);
 void map_groups__init(struct map_groups *mg, struct machine *machine);
 void map_groups__exit(struct map_groups *mg);
-int map_groups__clone(struct map_groups *mg,
+int map_groups__clone(struct thread *thread,
                      struct map_groups *parent, enum map_type type);
 size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
 
index 75465f89a4131bca863e389da525e1d51b90aae2..bbc368e7d1e4f7f9a53a329fac26d28785db9efc 100644 (file)
 #include "debug.h"
 #include "symbol.h"
 
+unsigned int perf_mem_events__loads_ldlat = 30;
+
 #define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
 
 struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
-       E("ldlat-loads",        "cpu/mem-loads,ldlat=30/P",     "mem-loads"),
+       E("ldlat-loads",        "cpu/mem-loads,ldlat=%u/P",     "mem-loads"),
        E("ldlat-stores",       "cpu/mem-stores/P",             "mem-stores"),
 };
 #undef E
 
 #undef E
 
+static char mem_loads_name[100];
+static bool mem_loads_name__init;
+
 char *perf_mem_events__name(int i)
 {
+       if (i == PERF_MEM_EVENTS__LOAD) {
+               if (!mem_loads_name__init) {
+                       mem_loads_name__init = true;
+                       scnprintf(mem_loads_name, sizeof(mem_loads_name),
+                                 perf_mem_events[i].name,
+                                 perf_mem_events__loads_ldlat);
+               }
+               return mem_loads_name;
+       }
+
        return (char *)perf_mem_events[i].name;
 }
 
index 5d6d93066a6e80e492634d6add9ad85f153211e6..7f69bf9d789da77cecd13e432874ca6e689e2a8a 100644 (file)
@@ -18,6 +18,7 @@ enum {
 };
 
 extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX];
+extern unsigned int perf_mem_events__loads_ldlat;
 
 int perf_mem_events__parse(const char *str);
 int perf_mem_events__init(void);
index c6fd0479f4cdb9a683c9ce9e2dcf63c69423e256..6c913c3914fb92163a6f8cabda62c0282b64c38e 100644 (file)
@@ -20,6 +20,7 @@
 #include "pmu.h"
 #include "thread_map.h"
 #include "cpumap.h"
+#include "probe-file.h"
 #include "asm/bug.h"
 
 #define MAX_NAME_LEN 100
@@ -436,7 +437,7 @@ int parse_events_add_cache(struct list_head *list, int *idx,
 }
 
 static void tracepoint_error(struct parse_events_error *e, int err,
-                            char *sys, char *name)
+                            const char *sys, const char *name)
 {
        char help[BUFSIZ];
 
@@ -466,7 +467,7 @@ static void tracepoint_error(struct parse_events_error *e, int err,
 }
 
 static int add_tracepoint(struct list_head *list, int *idx,
-                         char *sys_name, char *evt_name,
+                         const char *sys_name, const char *evt_name,
                          struct parse_events_error *err,
                          struct list_head *head_config)
 {
@@ -491,7 +492,7 @@ static int add_tracepoint(struct list_head *list, int *idx,
 }
 
 static int add_tracepoint_multi_event(struct list_head *list, int *idx,
-                                     char *sys_name, char *evt_name,
+                                     const char *sys_name, const char *evt_name,
                                      struct parse_events_error *err,
                                      struct list_head *head_config)
 {
@@ -533,7 +534,7 @@ static int add_tracepoint_multi_event(struct list_head *list, int *idx,
 }
 
 static int add_tracepoint_event(struct list_head *list, int *idx,
-                               char *sys_name, char *evt_name,
+                               const char *sys_name, const char *evt_name,
                                struct parse_events_error *err,
                                struct list_head *head_config)
 {
@@ -545,7 +546,7 @@ static int add_tracepoint_event(struct list_head *list, int *idx,
 }
 
 static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
-                                   char *sys_name, char *evt_name,
+                                   const char *sys_name, const char *evt_name,
                                    struct parse_events_error *err,
                                    struct list_head *head_config)
 {
@@ -584,7 +585,7 @@ struct __add_bpf_event_param {
        struct list_head *head_config;
 };
 
-static int add_bpf_event(struct probe_trace_event *tev, int fd,
+static int add_bpf_event(const char *group, const char *event, int fd,
                         void *_param)
 {
        LIST_HEAD(new_evsels);
@@ -595,27 +596,27 @@ static int add_bpf_event(struct probe_trace_event *tev, int fd,
        int err;
 
        pr_debug("add bpf event %s:%s and attach bpf program %d\n",
-                tev->group, tev->event, fd);
+                group, event, fd);
 
-       err = parse_events_add_tracepoint(&new_evsels, &evlist->idx, tev->group,
-                                         tev->event, evlist->error,
+       err = parse_events_add_tracepoint(&new_evsels, &evlist->idx, group,
+                                         event, evlist->error,
                                          param->head_config);
        if (err) {
                struct perf_evsel *evsel, *tmp;
 
                pr_debug("Failed to add BPF event %s:%s\n",
-                        tev->group, tev->event);
+                        group, event);
                list_for_each_entry_safe(evsel, tmp, &new_evsels, node) {
                        list_del(&evsel->node);
                        perf_evsel__delete(evsel);
                }
                return err;
        }
-       pr_debug("adding %s:%s\n", tev->group, tev->event);
+       pr_debug("adding %s:%s\n", group, event);
 
        list_for_each_entry(pos, &new_evsels, node) {
                pr_debug("adding %s:%s to %p\n",
-                        tev->group, tev->event, pos);
+                        group, event, pos);
                pos->bpf_fd = fd;
        }
        list_splice(&new_evsels, list);
@@ -661,7 +662,7 @@ int parse_events_load_bpf_obj(struct parse_events_evlist *data,
                goto errout;
        }
 
-       err = bpf__foreach_tev(obj, add_bpf_event, &param);
+       err = bpf__foreach_event(obj, add_bpf_event, &param);
        if (err) {
                snprintf(errbuf, sizeof(errbuf),
                         "Attach events in BPF object failed");
@@ -900,6 +901,9 @@ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
        [PARSE_EVENTS__TERM_TYPE_STACKSIZE]             = "stack-size",
        [PARSE_EVENTS__TERM_TYPE_NOINHERIT]             = "no-inherit",
        [PARSE_EVENTS__TERM_TYPE_INHERIT]               = "inherit",
+       [PARSE_EVENTS__TERM_TYPE_MAX_STACK]             = "max-stack",
+       [PARSE_EVENTS__TERM_TYPE_OVERWRITE]             = "overwrite",
+       [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE]           = "no-overwrite",
 };
 
 static bool config_term_shrinked;
@@ -992,9 +996,18 @@ do {                                                                          \
        case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
                CHECK_TYPE_VAL(NUM);
                break;
+       case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
+               CHECK_TYPE_VAL(NUM);
+               break;
+       case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
+               CHECK_TYPE_VAL(NUM);
+               break;
        case PARSE_EVENTS__TERM_TYPE_NAME:
                CHECK_TYPE_VAL(STR);
                break;
+       case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
+               CHECK_TYPE_VAL(NUM);
+               break;
        default:
                err->str = strdup("unknown term");
                err->idx = term->err_term;
@@ -1040,6 +1053,9 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
        case PARSE_EVENTS__TERM_TYPE_STACKSIZE:
        case PARSE_EVENTS__TERM_TYPE_INHERIT:
        case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
+       case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
+       case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
+       case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
                return config_term_common(attr, term, err);
        default:
                if (err) {
@@ -1109,6 +1125,15 @@ do {                                                             \
                case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
                        ADD_CONFIG_TERM(INHERIT, inherit, term->val.num ? 0 : 1);
                        break;
+               case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
+                       ADD_CONFIG_TERM(MAX_STACK, max_stack, term->val.num);
+                       break;
+               case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
+                       ADD_CONFIG_TERM(OVERWRITE, overwrite, term->val.num ? 1 : 0);
+                       break;
+               case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
+                       ADD_CONFIG_TERM(OVERWRITE, overwrite, term->val.num ? 0 : 1);
+                       break;
                default:
                        break;
                }
@@ -1118,7 +1143,7 @@ do {                                                              \
 }
 
 int parse_events_add_tracepoint(struct list_head *list, int *idx,
-                               char *sys, char *event,
+                               const char *sys, const char *event,
                                struct parse_events_error *err,
                                struct list_head *head_config)
 {
@@ -1388,7 +1413,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
        if (!add && get_event_modifier(&mod, str, NULL))
                return -EINVAL;
 
-       __evlist__for_each(list, evsel) {
+       __evlist__for_each_entry(list, evsel) {
                if (add && get_event_modifier(&mod, str, evsel))
                        return -EINVAL;
 
@@ -1414,7 +1439,7 @@ int parse_events_name(struct list_head *list, char *name)
 {
        struct perf_evsel *evsel;
 
-       __evlist__for_each(list, evsel) {
+       __evlist__for_each_entry(list, evsel) {
                if (!evsel->name)
                        evsel->name = strdup(name);
        }
@@ -1976,6 +2001,85 @@ static bool is_event_supported(u8 type, unsigned config)
        return ret;
 }
 
+void print_sdt_events(const char *subsys_glob, const char *event_glob,
+                     bool name_only)
+{
+       struct probe_cache *pcache;
+       struct probe_cache_entry *ent;
+       struct strlist *bidlist, *sdtlist;
+       struct strlist_config cfg = {.dont_dupstr = true};
+       struct str_node *nd, *nd2;
+       char *buf, *path, *ptr = NULL;
+       bool show_detail = false;
+       int ret;
+
+       sdtlist = strlist__new(NULL, &cfg);
+       if (!sdtlist) {
+               pr_debug("Failed to allocate new strlist for SDT\n");
+               return;
+       }
+       bidlist = build_id_cache__list_all(true);
+       if (!bidlist) {
+               pr_debug("Failed to get buildids: %d\n", errno);
+               return;
+       }
+       strlist__for_each_entry(nd, bidlist) {
+               pcache = probe_cache__new(nd->s);
+               if (!pcache)
+                       continue;
+               list_for_each_entry(ent, &pcache->entries, node) {
+                       if (!ent->sdt)
+                               continue;
+                       if (subsys_glob &&
+                           !strglobmatch(ent->pev.group, subsys_glob))
+                               continue;
+                       if (event_glob &&
+                           !strglobmatch(ent->pev.event, event_glob))
+                               continue;
+                       ret = asprintf(&buf, "%s:%s@%s", ent->pev.group,
+                                       ent->pev.event, nd->s);
+                       if (ret > 0)
+                               strlist__add(sdtlist, buf);
+               }
+               probe_cache__delete(pcache);
+       }
+       strlist__delete(bidlist);
+
+       strlist__for_each_entry(nd, sdtlist) {
+               buf = strchr(nd->s, '@');
+               if (buf)
+                       *(buf++) = '\0';
+               if (name_only) {
+                       printf("%s ", nd->s);
+                       continue;
+               }
+               nd2 = strlist__next(nd);
+               if (nd2) {
+                       ptr = strchr(nd2->s, '@');
+                       if (ptr)
+                               *ptr = '\0';
+                       if (strcmp(nd->s, nd2->s) == 0)
+                               show_detail = true;
+               }
+               if (show_detail) {
+                       path = build_id_cache__origname(buf);
+                       ret = asprintf(&buf, "%s@%s(%.12s)", nd->s, path, buf);
+                       if (ret > 0) {
+                               printf("  %-50s [%s]\n", buf, "SDT event");
+                               free(buf);
+                       }
+               } else
+                       printf("  %-50s [%s]\n", nd->s, "SDT event");
+               if (nd2) {
+                       if (strcmp(nd->s, nd2->s) != 0)
+                               show_detail = false;
+                       if (ptr)
+                               *ptr = '@';
+               }
+       }
+       strlist__delete(sdtlist);
+}
+
 int print_hwcache_events(const char *event_glob, bool name_only)
 {
        unsigned int type, op, i, evt_i = 0, evt_num = 0;
@@ -2158,6 +2262,8 @@ void print_events(const char *event_glob, bool name_only)
        }
 
        print_tracepoint_events(NULL, NULL, name_only);
+
+       print_sdt_events(NULL, NULL, name_only);
 }
 
 int parse_events__is_hardcoded_term(struct parse_events_term *term)
@@ -2322,9 +2428,9 @@ static void config_terms_list(char *buf, size_t buf_sz)
 char *parse_events_formats_error_string(char *additional_terms)
 {
        char *str;
-       /* "branch_type" is the longest name */
+       /* "no-overwrite" is the longest name */
        char static_terms[__PARSE_EVENTS__TERM_TYPE_NR *
-                         (sizeof("branch_type") - 1)];
+                         (sizeof("no-overwrite") - 1)];
 
        config_terms_list(static_terms, sizeof(static_terms));
        /* valid terms */
index d740c3ca9a1dfcd1409dfb6a31436785faeb755f..d1edbf8cc66a7704cf774c3f3fc80fee8f1a31bd 100644 (file)
@@ -68,6 +68,9 @@ enum {
        PARSE_EVENTS__TERM_TYPE_STACKSIZE,
        PARSE_EVENTS__TERM_TYPE_NOINHERIT,
        PARSE_EVENTS__TERM_TYPE_INHERIT,
+       PARSE_EVENTS__TERM_TYPE_MAX_STACK,
+       PARSE_EVENTS__TERM_TYPE_NOOVERWRITE,
+       PARSE_EVENTS__TERM_TYPE_OVERWRITE,
        __PARSE_EVENTS__TERM_TYPE_NR,
 };
 
@@ -133,7 +136,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add);
 int parse_events__modifier_group(struct list_head *list, char *event_mod);
 int parse_events_name(struct list_head *list, char *name);
 int parse_events_add_tracepoint(struct list_head *list, int *idx,
-                               char *sys, char *event,
+                               const char *sys, const char *event,
                                struct parse_events_error *error,
                                struct list_head *head_config);
 int parse_events_load_bpf(struct parse_events_evlist *data,
@@ -182,6 +185,8 @@ void print_symbol_events(const char *event_glob, unsigned type,
 void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
                             bool name_only);
 int print_hwcache_events(const char *event_glob, bool name_only);
+void print_sdt_events(const char *subsys_glob, const char *event_glob,
+                     bool name_only);
 int is_valid_tracepoint(const char *event_string);
 
 int valid_event_mount(const char *eventfs);
index 1477fbc78993c7b31d7bb1531f630cca8587d93f..7a2519435da05273febd9af1974637cbc1c5d8c7 100644 (file)
@@ -199,8 +199,11 @@ branch_type                { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE
 time                   { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); }
 call-graph             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); }
 stack-size             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); }
+max-stack              { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_MAX_STACK); }
 inherit                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); }
 no-inherit             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); }
+overwrite              { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_OVERWRITE); }
+no-overwrite           { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOOVERWRITE); }
 ,                      { return ','; }
 "/"                    { BEGIN(INITIAL); return '/'; }
 {name_minus}           { return str(yyscanner, PE_NAME); }
@@ -259,6 +262,7 @@ cycles-ct                                   { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
 cycles-t                                       { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
 mem-loads                                      { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
 mem-stores                                     { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
+topdown-[a-z-]+                                        { return str(yyscanner, PE_KERNEL_PMU_EVENT); }
 
 L1-dcache|l1-d|l1d|L1-data             |
 L1-icache|l1-i|l1i|L1-instruction      |
index 3bf6bf82ff2d045a8e00f5092eba260107a88408..7c7630be5a897d651bbda7eaf2e0c4addcf95a03 100644 (file)
  * which is what it's designed for.
  */
 #include "cache.h"
+#include "util.h"
+#include <limits.h>
 
 static char bad_path[] = "/bad-path/";
 /*
- * Two hacks:
+ * One hack:
  */
-
-static const char *get_perf_dir(void)
-{
-       return ".";
-}
-
 static char *get_pathname(void)
 {
        static char pathname_array[4][PATH_MAX];
@@ -54,60 +50,3 @@ char *mkpath(const char *fmt, ...)
                return bad_path;
        return cleanup_path(pathname);
 }
-
-char *perf_path(const char *fmt, ...)
-{
-       const char *perf_dir = get_perf_dir();
-       char *pathname = get_pathname();
-       va_list args;
-       unsigned len;
-
-       len = strlen(perf_dir);
-       if (len > PATH_MAX-100)
-               return bad_path;
-       memcpy(pathname, perf_dir, len);
-       if (len && perf_dir[len-1] != '/')
-               pathname[len++] = '/';
-       va_start(args, fmt);
-       len += vsnprintf(pathname + len, PATH_MAX - len, fmt, args);
-       va_end(args);
-       if (len >= PATH_MAX)
-               return bad_path;
-       return cleanup_path(pathname);
-}
-
-/* strip arbitrary amount of directory separators at end of path */
-static inline int chomp_trailing_dir_sep(const char *path, int len)
-{
-       while (len && is_dir_sep(path[len - 1]))
-               len--;
-       return len;
-}
-
-/*
- * If path ends with suffix (complete path components), returns the
- * part before suffix (sans trailing directory separators).
- * Otherwise returns NULL.
- */
-char *strip_path_suffix(const char *path, const char *suffix)
-{
-       int path_len = strlen(path), suffix_len = strlen(suffix);
-
-       while (suffix_len) {
-               if (!path_len)
-                       return NULL;
-
-               if (is_dir_sep(path[path_len - 1])) {
-                       if (!is_dir_sep(suffix[suffix_len - 1]))
-                               return NULL;
-                       path_len = chomp_trailing_dir_sep(path, path_len);
-                       suffix_len = chomp_trailing_dir_sep(suffix, suffix_len);
-               }
-               else if (path[--path_len] != suffix[--suffix_len])
-                       return NULL;
-       }
-
-       if (path_len && !is_dir_sep(path[path_len - 1]))
-               return NULL;
-       return strndup(path, chomp_trailing_dir_sep(path, path_len));
-}
index 74401a20106d1c4baf9ed495125ede61995835f4..953dc1ab2ed7bd0be442c4491c9c8a4862aa3386 100644 (file)
@@ -67,7 +67,6 @@ int e_snprintf(char *str, size_t size, const char *format, ...)
        return ret;
 }
 
-static char *synthesize_perf_probe_point(struct perf_probe_point *pp);
 static struct machine *host_machine;
 
 /* Initialize symbol maps and path of vmlinux/modules */
@@ -103,10 +102,8 @@ out:
 
 void exit_probe_symbol_maps(void)
 {
-       if (host_machine) {
-               machine__delete(host_machine);
-               host_machine = NULL;
-       }
+       machine__delete(host_machine);
+       host_machine = NULL;
        symbol__exit();
 }
 
@@ -471,7 +468,7 @@ static struct debuginfo *open_debuginfo(const char *module, bool silent)
                err = kernel_get_module_dso(module, &dso);
                if (err < 0) {
                        if (!dso || dso->load_errno == 0) {
-                               if (!strerror_r(-err, reason, STRERR_BUFSIZE))
+                               if (!str_error_r(-err, reason, STRERR_BUFSIZE))
                                        strcpy(reason, "(unknown)");
                        } else
                                dso__strerror_load(dso, reason, STRERR_BUFSIZE);
@@ -809,7 +806,7 @@ static int __show_one_line(FILE *fp, int l, bool skip, bool show_num)
 error:
        if (ferror(fp)) {
                pr_warning("File read error: %s\n",
-                          strerror_r(errno, sbuf, sizeof(sbuf)));
+                          str_error_r(errno, sbuf, sizeof(sbuf)));
                return -1;
        }
        return 0;
@@ -889,7 +886,7 @@ static int __show_line_range(struct line_range *lr, const char *module,
        fp = fopen(lr->path, "r");
        if (fp == NULL) {
                pr_warning("Failed to open %s: %s\n", lr->path,
-                          strerror_r(errno, sbuf, sizeof(sbuf)));
+                          str_error_r(errno, sbuf, sizeof(sbuf)));
                return -errno;
        }
        /* Skip to starting line number */
@@ -899,7 +896,7 @@ static int __show_line_range(struct line_range *lr, const char *module,
                        goto end;
        }
 
-       intlist__for_each(ln, lr->line_list) {
+       intlist__for_each_entry(ln, lr->line_list) {
                for (; ln->i > l; l++) {
                        ret = show_one_line(fp, l - lr->offset);
                        if (ret < 0)
@@ -983,7 +980,7 @@ static int show_available_vars_at(struct debuginfo *dinfo,
                zfree(&vl->point.symbol);
                nvars = 0;
                if (vl->vars) {
-                       strlist__for_each(node, vl->vars) {
+                       strlist__for_each_entry(node, vl->vars) {
                                var = strchr(node->s, '\t') + 1;
                                if (strfilter__compare(_filter, var)) {
                                        fprintf(stdout, "\t\t%s\n", node->s);
@@ -1200,6 +1197,34 @@ err:
        return err;
 }
 
+static int parse_perf_probe_event_name(char **arg, struct perf_probe_event *pev)
+{
+       char *ptr;
+
+       ptr = strchr(*arg, ':');
+       if (ptr) {
+               *ptr = '\0';
+               if (!pev->sdt && !is_c_func_name(*arg))
+                       goto ng_name;
+               pev->group = strdup(*arg);
+               if (!pev->group)
+                       return -ENOMEM;
+               *arg = ptr + 1;
+       } else
+               pev->group = NULL;
+       if (!pev->sdt && !is_c_func_name(*arg)) {
+ng_name:
+               semantic_error("%s is bad for event name -it must "
+                              "follow C symbol-naming rule.\n", *arg);
+               return -EINVAL;
+       }
+       pev->event = strdup(*arg);
+       if (pev->event == NULL)
+               return -ENOMEM;
+
+       return 0;
+}
+
 /* Parse probepoint definition. */
 static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
 {
@@ -1207,33 +1232,64 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
        char *ptr, *tmp;
        char c, nc = 0;
        bool file_spec = false;
+       int ret;
+
        /*
         * <Syntax>
-        * perf probe [EVENT=]SRC[:LN|;PTN]
-        * perf probe [EVENT=]FUNC[@SRC][+OFFS|%return|:LN|;PAT]
-        *
-        * TODO:Group name support
+        * perf probe [GRP:][EVENT=]SRC[:LN|;PTN]
+        * perf probe [GRP:][EVENT=]FUNC[@SRC][+OFFS|%return|:LN|;PAT]
+        * perf probe %[GRP:]SDT_EVENT
         */
        if (!arg)
                return -EINVAL;
 
+       /*
+        * If the probe point starts with '%',
+        * or starts with "sdt_" and has a ':' but no '=',
+        * then it should be a SDT/cached probe point.
+        */
+       if (arg[0] == '%' ||
+           (!strncmp(arg, "sdt_", 4) &&
+            !!strchr(arg, ':') && !strchr(arg, '='))) {
+               pev->sdt = true;
+               if (arg[0] == '%')
+                       arg++;
+       }
+
        ptr = strpbrk(arg, ";=@+%");
+       if (pev->sdt) {
+               if (ptr) {
+                       if (*ptr != '@') {
+                               semantic_error("%s must be an SDT name.\n",
+                                              arg);
+                               return -EINVAL;
+                       }
+                       /* This must be a target file name or build id */
+                       tmp = build_id_cache__complement(ptr + 1);
+                       if (tmp) {
+                               pev->target = build_id_cache__origname(tmp);
+                               free(tmp);
+                       } else
+                               pev->target = strdup(ptr + 1);
+                       if (!pev->target)
+                               return -ENOMEM;
+                       *ptr = '\0';
+               }
+               ret = parse_perf_probe_event_name(&arg, pev);
+               if (ret == 0) {
+                       if (asprintf(&pev->point.function, "%%%s", pev->event) < 0)
+                               ret = -errno;
+               }
+               return ret;
+       }
+
        if (ptr && *ptr == '=') {       /* Event name */
                *ptr = '\0';
                tmp = ptr + 1;
-               if (strchr(arg, ':')) {
-                       semantic_error("Group name is not supported yet.\n");
-                       return -ENOTSUP;
-               }
-               if (!is_c_func_name(arg)) {
-                       semantic_error("%s is bad for event name -it must "
-                                      "follow C symbol-naming rule.\n", arg);
-                       return -EINVAL;
-               }
-               pev->event = strdup(arg);
-               if (pev->event == NULL)
-                       return -ENOMEM;
-               pev->group = NULL;
+               ret = parse_perf_probe_event_name(&arg, pev);
+               if (ret < 0)
+                       return ret;
+
                arg = tmp;
        }
 
@@ -1545,7 +1601,9 @@ bool perf_probe_event_need_dwarf(struct perf_probe_event *pev)
                return true;
 
        for (i = 0; i < pev->nargs; i++)
-               if (is_c_varname(pev->args[i].var))
+               if (is_c_varname(pev->args[i].var) ||
+                   !strcmp(pev->args[i].var, "$params") ||
+                   !strcmp(pev->args[i].var, "$vars"))
                        return true;
 
        return false;
@@ -1603,6 +1661,11 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev)
        p = strchr(argv[1], ':');
        if (p) {
                tp->module = strndup(argv[1], p - argv[1]);
+               if (!tp->module) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               tev->uprobes = (tp->module[0] == '/');
                p++;
        } else
                p = argv[1];
@@ -1712,7 +1775,7 @@ out:
 }
 
 /* Compose only probe point (not argument) */
-static char *synthesize_perf_probe_point(struct perf_probe_point *pp)
+char *synthesize_perf_probe_point(struct perf_probe_point *pp)
 {
        struct strbuf buf;
        char *tmp, *ret = NULL;
@@ -1751,30 +1814,36 @@ out:
        return ret;
 }
 
-#if 0
 char *synthesize_perf_probe_command(struct perf_probe_event *pev)
 {
-       char *buf;
-       int i, len, ret;
+       struct strbuf buf;
+       char *tmp, *ret = NULL;
+       int i;
 
-       buf = synthesize_perf_probe_point(&pev->point);
-       if (!buf)
+       if (strbuf_init(&buf, 64))
                return NULL;
+       if (pev->event)
+               if (strbuf_addf(&buf, "%s:%s=", pev->group ?: PERFPROBE_GROUP,
+                               pev->event) < 0)
+                       goto out;
+
+       tmp = synthesize_perf_probe_point(&pev->point);
+       if (!tmp || strbuf_addstr(&buf, tmp) < 0)
+               goto out;
+       free(tmp);
 
-       len = strlen(buf);
        for (i = 0; i < pev->nargs; i++) {
-               ret = e_snprintf(&buf[len], MAX_CMDLEN - len, " %s",
-                                pev->args[i].name);
-               if (ret <= 0) {
-                       free(buf);
-                       return NULL;
-               }
-               len += ret;
+               tmp = synthesize_perf_probe_arg(pev->args + i);
+               if (!tmp || strbuf_addf(&buf, " %s", tmp) < 0)
+                       goto out;
+               free(tmp);
        }
 
-       return buf;
+       ret = strbuf_detach(&buf, NULL);
+out:
+       strbuf_release(&buf);
+       return ret;
 }
-#endif
 
 static int __synthesize_probe_trace_arg_ref(struct probe_trace_arg_ref *ref,
                                            struct strbuf *buf, int depth)
@@ -2026,6 +2095,79 @@ void clear_perf_probe_event(struct perf_probe_event *pev)
        memset(pev, 0, sizeof(*pev));
 }
 
+#define strdup_or_goto(str, label)     \
+({ char *__p = NULL; if (str && !(__p = strdup(str))) goto label; __p; })
+
+static int perf_probe_point__copy(struct perf_probe_point *dst,
+                                 struct perf_probe_point *src)
+{
+       dst->file = strdup_or_goto(src->file, out_err);
+       dst->function = strdup_or_goto(src->function, out_err);
+       dst->lazy_line = strdup_or_goto(src->lazy_line, out_err);
+       dst->line = src->line;
+       dst->retprobe = src->retprobe;
+       dst->offset = src->offset;
+       return 0;
+
+out_err:
+       clear_perf_probe_point(dst);
+       return -ENOMEM;
+}
+
+static int perf_probe_arg__copy(struct perf_probe_arg *dst,
+                               struct perf_probe_arg *src)
+{
+       struct perf_probe_arg_field *field, **ppfield;
+
+       dst->name = strdup_or_goto(src->name, out_err);
+       dst->var = strdup_or_goto(src->var, out_err);
+       dst->type = strdup_or_goto(src->type, out_err);
+
+       field = src->field;
+       ppfield = &(dst->field);
+       while (field) {
+               *ppfield = zalloc(sizeof(*field));
+               if (!*ppfield)
+                       goto out_err;
+               (*ppfield)->name = strdup_or_goto(field->name, out_err);
+               (*ppfield)->index = field->index;
+               (*ppfield)->ref = field->ref;
+               field = field->next;
+               ppfield = &((*ppfield)->next);
+       }
+       return 0;
+out_err:
+       return -ENOMEM;
+}
+
+int perf_probe_event__copy(struct perf_probe_event *dst,
+                          struct perf_probe_event *src)
+{
+       int i;
+
+       dst->event = strdup_or_goto(src->event, out_err);
+       dst->group = strdup_or_goto(src->group, out_err);
+       dst->target = strdup_or_goto(src->target, out_err);
+       dst->uprobes = src->uprobes;
+
+       if (perf_probe_point__copy(&dst->point, &src->point) < 0)
+               goto out_err;
+
+       dst->args = zalloc(sizeof(struct perf_probe_arg) * src->nargs);
+       if (!dst->args)
+               goto out_err;
+       dst->nargs = src->nargs;
+
+       for (i = 0; i < src->nargs; i++)
+               if (perf_probe_arg__copy(&dst->args[i], &src->args[i]) < 0)
+                       goto out_err;
+       return 0;
+
+out_err:
+       clear_perf_probe_event(dst);
+       return -ENOMEM;
+}
+
 void clear_probe_trace_event(struct probe_trace_event *tev)
 {
        struct probe_trace_arg_ref *ref, *next;
@@ -2253,7 +2395,7 @@ static int __show_perf_probe_events(int fd, bool is_kprobe,
        if (!rawlist)
                return -ENOMEM;
 
-       strlist__for_each(ent, rawlist) {
+       strlist__for_each_entry(ent, rawlist) {
                ret = parse_probe_trace_command(ent->s, &tev);
                if (ret >= 0) {
                        if (!filter_probe_trace_event(&tev, filter))
@@ -2286,6 +2428,9 @@ int show_perf_probe_events(struct strfilter *filter)
 
        setup_pager();
 
+       if (probe_conf.cache)
+               return probe_cache__show_all_caches(filter);
+
        ret = init_probe_symbol_maps(false);
        if (ret < 0)
                return ret;
@@ -2394,17 +2539,24 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
        char buf[64];
        int ret;
 
-       if (pev->event)
+       /* If probe_event or trace_event already have the name, reuse it */
+       if (pev->event && !pev->sdt)
                event = pev->event;
-       else
+       else if (tev->event)
+               event = tev->event;
+       else {
+               /* Or generate new one from probe point */
                if (pev->point.function &&
                        (strncmp(pev->point.function, "0x", 2) != 0) &&
                        !strisglob(pev->point.function))
                        event = pev->point.function;
                else
                        event = tev->point.realname;
-       if (pev->group)
+       }
+       if (pev->group && !pev->sdt)
                group = pev->group;
+       else if (tev->group)
+               group = tev->group;
        else
                group = PERFPROBE_GROUP;
 
@@ -2426,40 +2578,60 @@ static int probe_trace_event__set_name(struct probe_trace_event *tev,
        return 0;
 }
 
-static int __add_probe_trace_events(struct perf_probe_event *pev,
-                                    struct probe_trace_event *tevs,
-                                    int ntevs, bool allow_suffix)
+static int __open_probe_file_and_namelist(bool uprobe,
+                                         struct strlist **namelist)
 {
-       int i, fd, ret;
-       struct probe_trace_event *tev = NULL;
-       struct strlist *namelist;
+       int fd;
 
-       fd = probe_file__open(PF_FL_RW | (pev->uprobes ? PF_FL_UPROBE : 0));
+       fd = probe_file__open(PF_FL_RW | (uprobe ? PF_FL_UPROBE : 0));
        if (fd < 0)
                return fd;
 
        /* Get current event names */
-       namelist = probe_file__get_namelist(fd);
-       if (!namelist) {
+       *namelist = probe_file__get_namelist(fd);
+       if (!(*namelist)) {
                pr_debug("Failed to get current event list.\n");
-               ret = -ENOMEM;
-               goto close_out;
+               close(fd);
+               return -ENOMEM;
        }
+       return fd;
+}
+
+static int __add_probe_trace_events(struct perf_probe_event *pev,
+                                    struct probe_trace_event *tevs,
+                                    int ntevs, bool allow_suffix)
+{
+       int i, fd[2] = {-1, -1}, up, ret;
+       struct probe_trace_event *tev = NULL;
+       struct probe_cache *cache = NULL;
+       struct strlist *namelist[2] = {NULL, NULL};
+
+       up = pev->uprobes ? 1 : 0;
+       fd[up] = __open_probe_file_and_namelist(up, &namelist[up]);
+       if (fd[up] < 0)
+               return fd[up];
 
        ret = 0;
        for (i = 0; i < ntevs; i++) {
                tev = &tevs[i];
+               up = tev->uprobes ? 1 : 0;
+               if (fd[up] == -1) {     /* Open the kprobe/uprobe_events */
+                       fd[up] = __open_probe_file_and_namelist(up,
+                                                               &namelist[up]);
+                       if (fd[up] < 0)
+                               goto close_out;
+               }
                /* Skip if the symbol is out of .text or blacklisted */
-               if (!tev->point.symbol)
+               if (!tev->point.symbol && !pev->uprobes)
                        continue;
 
                /* Set new name for tev (and update namelist) */
-               ret = probe_trace_event__set_name(tev, pev, namelist,
+               ret = probe_trace_event__set_name(tev, pev, namelist[up],
                                                  allow_suffix);
                if (ret < 0)
                        break;
 
-               ret = probe_file__add_event(fd, tev);
+               ret = probe_file__add_event(fd[up], tev);
                if (ret < 0)
                        break;
 
@@ -2473,10 +2645,21 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
        }
        if (ret == -EINVAL && pev->uprobes)
                warn_uprobe_event_compat(tev);
+       if (ret == 0 && probe_conf.cache) {
+               cache = probe_cache__new(pev->target);
+               if (!cache ||
+                   probe_cache__add_entry(cache, pev, tevs, ntevs) < 0 ||
+                   probe_cache__commit(cache) < 0)
+                       pr_warning("Failed to add event to probe cache\n");
+               probe_cache__delete(cache);
+       }
 
-       strlist__delete(namelist);
 close_out:
-       close(fd);
+       for (up = 0; up < 2; up++) {
+               strlist__delete(namelist[up]);
+               if (fd[up] >= 0)
+                       close(fd[up]);
+       }
        return ret;
 }
 
@@ -2501,9 +2684,6 @@ static int find_probe_functions(struct map *map, char *name,
        return found;
 }
 
-#define strdup_or_goto(str, label)     \
-       ({ char *__p = strdup(str); if (!__p) goto label; __p; })
-
 void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused,
                                struct probe_trace_event *tev __maybe_unused,
                                struct map *map __maybe_unused,
@@ -2758,12 +2938,205 @@ errout:
 
 bool __weak arch__prefers_symtab(void) { return false; }
 
+/* Concatinate two arrays */
+static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b)
+{
+       void *ret;
+
+       ret = malloc(sz_a + sz_b);
+       if (ret) {
+               memcpy(ret, a, sz_a);
+               memcpy(ret + sz_a, b, sz_b);
+       }
+       return ret;
+}
+
+static int
+concat_probe_trace_events(struct probe_trace_event **tevs, int *ntevs,
+                         struct probe_trace_event **tevs2, int ntevs2)
+{
+       struct probe_trace_event *new_tevs;
+       int ret = 0;
+
+       if (ntevs == 0) {
+               *tevs = *tevs2;
+               *ntevs = ntevs2;
+               *tevs2 = NULL;
+               return 0;
+       }
+
+       if (*ntevs + ntevs2 > probe_conf.max_probes)
+               ret = -E2BIG;
+       else {
+               /* Concatinate the array of probe_trace_event */
+               new_tevs = memcat(*tevs, (*ntevs) * sizeof(**tevs),
+                                 *tevs2, ntevs2 * sizeof(**tevs2));
+               if (!new_tevs)
+                       ret = -ENOMEM;
+               else {
+                       free(*tevs);
+                       *tevs = new_tevs;
+                       *ntevs += ntevs2;
+               }
+       }
+       if (ret < 0)
+               clear_probe_trace_events(*tevs2, ntevs2);
+       zfree(tevs2);
+
+       return ret;
+}
+
+/*
+ * Try to find probe_trace_event from given probe caches. Return the number
+ * of cached events found, if an error occurs return the error.
+ */
+static int find_cached_events(struct perf_probe_event *pev,
+                             struct probe_trace_event **tevs,
+                             const char *target)
+{
+       struct probe_cache *cache;
+       struct probe_cache_entry *entry;
+       struct probe_trace_event *tmp_tevs = NULL;
+       int ntevs = 0;
+       int ret = 0;
+
+       cache = probe_cache__new(target);
+       /* Return 0 ("not found") if the target has no probe cache. */
+       if (!cache)
+               return 0;
+
+       for_each_probe_cache_entry(entry, cache) {
+               /* Skip the cache entry which has no name */
+               if (!entry->pev.event || !entry->pev.group)
+                       continue;
+               if ((!pev->group || strglobmatch(entry->pev.group, pev->group)) &&
+                   strglobmatch(entry->pev.event, pev->event)) {
+                       ret = probe_cache_entry__get_event(entry, &tmp_tevs);
+                       if (ret > 0)
+                               ret = concat_probe_trace_events(tevs, &ntevs,
+                                                               &tmp_tevs, ret);
+                       if (ret < 0)
+                               break;
+               }
+       }
+       probe_cache__delete(cache);
+       if (ret < 0) {
+               clear_probe_trace_events(*tevs, ntevs);
+               zfree(tevs);
+       } else {
+               ret = ntevs;
+               if (ntevs > 0 && target && target[0] == '/')
+                       pev->uprobes = true;
+       }
+
+       return ret;
+}
+
+/* Try to find probe_trace_event from all probe caches */
+static int find_cached_events_all(struct perf_probe_event *pev,
+                                  struct probe_trace_event **tevs)
+{
+       struct probe_trace_event *tmp_tevs = NULL;
+       struct strlist *bidlist;
+       struct str_node *nd;
+       char *pathname;
+       int ntevs = 0;
+       int ret;
+
+       /* Get the buildid list of all valid caches */
+       bidlist = build_id_cache__list_all(true);
+       if (!bidlist) {
+               ret = -errno;
+               pr_debug("Failed to get buildids: %d\n", ret);
+               return ret;
+       }
+
+       ret = 0;
+       strlist__for_each_entry(nd, bidlist) {
+               pathname = build_id_cache__origname(nd->s);
+               ret = find_cached_events(pev, &tmp_tevs, pathname);
+               /* In the case of cnt == 0, we just skip it */
+               if (ret > 0)
+                       ret = concat_probe_trace_events(tevs, &ntevs,
+                                                       &tmp_tevs, ret);
+               free(pathname);
+               if (ret < 0)
+                       break;
+       }
+       strlist__delete(bidlist);
+
+       if (ret < 0) {
+               clear_probe_trace_events(*tevs, ntevs);
+               zfree(tevs);
+       } else
+               ret = ntevs;
+
+       return ret;
+}
+
+static int find_probe_trace_events_from_cache(struct perf_probe_event *pev,
+                                             struct probe_trace_event **tevs)
+{
+       struct probe_cache *cache;
+       struct probe_cache_entry *entry;
+       struct probe_trace_event *tev;
+       struct str_node *node;
+       int ret, i;
+
+       if (pev->sdt) {
+               /* For SDT/cached events, we use special search functions */
+               if (!pev->target)
+                       return find_cached_events_all(pev, tevs);
+               else
+                       return find_cached_events(pev, tevs, pev->target);
+       }
+       cache = probe_cache__new(pev->target);
+       if (!cache)
+               return 0;
+
+       entry = probe_cache__find(cache, pev);
+       if (!entry) {
+               /* SDT must be in the cache */
+               ret = pev->sdt ? -ENOENT : 0;
+               goto out;
+       }
+
+       ret = strlist__nr_entries(entry->tevlist);
+       if (ret > probe_conf.max_probes) {
+               pr_debug("Too many entries matched in the cache of %s\n",
+                        pev->target ? : "kernel");
+               ret = -E2BIG;
+               goto out;
+       }
+
+       *tevs = zalloc(ret * sizeof(*tev));
+       if (!*tevs) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       i = 0;
+       strlist__for_each_entry(node, entry->tevlist) {
+               tev = &(*tevs)[i++];
+               ret = parse_probe_trace_command(node->s, tev);
+               if (ret < 0)
+                       goto out;
+               /* Set the uprobes attribute as same as original */
+               tev->uprobes = pev->uprobes;
+       }
+       ret = i;
+
+out:
+       probe_cache__delete(cache);
+       return ret;
+}
+
 static int convert_to_probe_trace_events(struct perf_probe_event *pev,
                                         struct probe_trace_event **tevs)
 {
        int ret;
 
-       if (!pev->group) {
+       if (!pev->group && !pev->sdt) {
                /* Set group name if not given */
                if (!pev->uprobes) {
                        pev->group = strdup(PERFPROBE_GROUP);
@@ -2780,6 +3153,11 @@ static int convert_to_probe_trace_events(struct perf_probe_event *pev,
        if (ret > 0)
                return ret;
 
+       /* At first, we need to lookup cache entry */
+       ret = find_probe_trace_events_from_cache(pev, tevs);
+       if (ret > 0 || pev->sdt)        /* SDT can be found only in the cache */
+               return ret == 0 ? -ENOENT : ret; /* Found in probe cache */
+
        if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) {
                ret = find_probe_trace_events_from_map(pev, tevs);
                if (ret > 0)
@@ -2934,8 +3312,16 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
 
        /* Load symbols with given filter */
        available_func_filter = _filter;
-       if (map__load(map, filter_available_functions)) {
-               pr_err("Failed to load symbols in %s\n", (target) ? : "kernel");
+       ret = map__load(map, filter_available_functions);
+       if (ret) {
+               if (ret == -2) {
+                       char *str = strfilter__string(_filter);
+                       pr_err("Failed to find symbols matched to \"%s\"\n",
+                              str);
+                       free(str);
+               } else
+                       pr_err("Failed to load symbols in %s\n",
+                              (target) ? : "kernel");
                goto end;
        }
        if (!dso__sorted_by_name(map->dso, map->type))
index 5a27eb4fad05a29e518ac7b9ecad4a25fb8f4dd6..e18ea9fe63857cb7a9b382dac563fd2c8cdfbd85 100644 (file)
@@ -12,6 +12,7 @@ struct probe_conf {
        bool    show_location_range;
        bool    force_add;
        bool    no_inlines;
+       bool    cache;
        int     max_probes;
 };
 extern struct probe_conf probe_conf;
@@ -84,6 +85,7 @@ struct perf_probe_event {
        char                    *group; /* Group name */
        struct perf_probe_point point;  /* Probe point */
        int                     nargs;  /* Number of arguments */
+       bool                    sdt;    /* SDT/cached event flag */
        bool                    uprobes;        /* Uprobe event flag */
        char                    *target;        /* Target binary */
        struct perf_probe_arg   *args;  /* Arguments */
@@ -121,6 +123,10 @@ int parse_probe_trace_command(const char *cmd, struct probe_trace_event *tev);
 char *synthesize_perf_probe_command(struct perf_probe_event *pev);
 char *synthesize_probe_trace_command(struct probe_trace_event *tev);
 char *synthesize_perf_probe_arg(struct perf_probe_arg *pa);
+char *synthesize_perf_probe_point(struct perf_probe_point *pp);
+
+int perf_probe_event__copy(struct perf_probe_event *dst,
+                          struct perf_probe_event *src);
 
 /* Check the perf_probe_event needs debuginfo */
 bool perf_probe_event_need_dwarf(struct perf_probe_event *pev);
index 3fe6214970e632932cb3809dea4c36d0aa792029..9aed9c332da656c69d91693e531df87fb03007b1 100644 (file)
@@ -14,6 +14,7 @@
  * GNU General Public License for more details.
  *
  */
+#include <sys/uio.h>
 #include "util.h"
 #include "event.h"
 #include "strlist.h"
@@ -49,7 +50,7 @@ static void print_open_warning(int err, bool uprobe)
        else
                pr_warning("Failed to open %cprobe_events: %s\n",
                           uprobe ? 'u' : 'k',
-                          strerror_r(-err, sbuf, sizeof(sbuf)));
+                          str_error_r(-err, sbuf, sizeof(sbuf)));
 }
 
 static void print_both_open_warning(int kerr, int uerr)
@@ -63,9 +64,9 @@ static void print_both_open_warning(int kerr, int uerr)
        else {
                char sbuf[STRERR_BUFSIZE];
                pr_warning("Failed to open kprobe events: %s.\n",
-                          strerror_r(-kerr, sbuf, sizeof(sbuf)));
+                          str_error_r(-kerr, sbuf, sizeof(sbuf)));
                pr_warning("Failed to open uprobe events: %s.\n",
-                          strerror_r(-uerr, sbuf, sizeof(sbuf)));
+                          str_error_r(-uerr, sbuf, sizeof(sbuf)));
        }
 }
 
@@ -177,7 +178,7 @@ static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
        if (!rawlist)
                return NULL;
        sl = strlist__new(NULL, NULL);
-       strlist__for_each(ent, rawlist) {
+       strlist__for_each_entry(ent, rawlist) {
                ret = parse_probe_trace_command(ent->s, &tev);
                if (ret < 0)
                        break;
@@ -223,7 +224,7 @@ int probe_file__add_event(int fd, struct probe_trace_event *tev)
                if (write(fd, buf, strlen(buf)) < (int)strlen(buf)) {
                        ret = -errno;
                        pr_warning("Failed to write event: %s\n",
-                                  strerror_r(errno, sbuf, sizeof(sbuf)));
+                                  str_error_r(errno, sbuf, sizeof(sbuf)));
                }
        }
        free(buf);
@@ -261,7 +262,7 @@ static int __del_trace_probe_event(int fd, struct str_node *ent)
        return 0;
 error:
        pr_warning("Failed to delete event: %s\n",
-                  strerror_r(-ret, buf, sizeof(buf)));
+                  str_error_r(-ret, buf, sizeof(buf)));
        return ret;
 }
 
@@ -280,7 +281,7 @@ int probe_file__get_events(int fd, struct strfilter *filter,
        if (!namelist)
                return -ENOENT;
 
-       strlist__for_each(ent, namelist) {
+       strlist__for_each_entry(ent, namelist) {
                p = strchr(ent->s, ':');
                if ((p && strfilter__compare(filter, p + 1)) ||
                    strfilter__compare(filter, ent->s)) {
@@ -298,7 +299,7 @@ int probe_file__del_strlist(int fd, struct strlist *namelist)
        int ret = 0;
        struct str_node *ent;
 
-       strlist__for_each(ent, namelist) {
+       strlist__for_each_entry(ent, namelist) {
                ret = __del_trace_probe_event(fd, ent);
                if (ret < 0)
                        break;
@@ -324,3 +325,533 @@ int probe_file__del_events(int fd, struct strfilter *filter)
 
        return ret;
 }
+
+/* Caller must ensure to remove this entry from list */
+static void probe_cache_entry__delete(struct probe_cache_entry *entry)
+{
+       if (entry) {
+               BUG_ON(!list_empty(&entry->node));
+
+               strlist__delete(entry->tevlist);
+               clear_perf_probe_event(&entry->pev);
+               zfree(&entry->spev);
+               free(entry);
+       }
+}
+
+static struct probe_cache_entry *
+probe_cache_entry__new(struct perf_probe_event *pev)
+{
+       struct probe_cache_entry *entry = zalloc(sizeof(*entry));
+
+       if (entry) {
+               INIT_LIST_HEAD(&entry->node);
+               entry->tevlist = strlist__new(NULL, NULL);
+               if (!entry->tevlist)
+                       zfree(&entry);
+               else if (pev) {
+                       entry->spev = synthesize_perf_probe_command(pev);
+                       if (!entry->spev ||
+                           perf_probe_event__copy(&entry->pev, pev) < 0) {
+                               probe_cache_entry__delete(entry);
+                               return NULL;
+                       }
+               }
+       }
+
+       return entry;
+}
+
+int probe_cache_entry__get_event(struct probe_cache_entry *entry,
+                                struct probe_trace_event **tevs)
+{
+       struct probe_trace_event *tev;
+       struct str_node *node;
+       int ret, i;
+
+       ret = strlist__nr_entries(entry->tevlist);
+       if (ret > probe_conf.max_probes)
+               return -E2BIG;
+
+       *tevs = zalloc(ret * sizeof(*tev));
+       if (!*tevs)
+               return -ENOMEM;
+
+       i = 0;
+       strlist__for_each_entry(node, entry->tevlist) {
+               tev = &(*tevs)[i++];
+               ret = parse_probe_trace_command(node->s, tev);
+               if (ret < 0)
+                       break;
+       }
+       return i;
+}
+
+/* For the kernel probe caches, pass target = NULL or DSO__NAME_KALLSYMS */
+static int probe_cache__open(struct probe_cache *pcache, const char *target)
+{
+       char cpath[PATH_MAX];
+       char sbuildid[SBUILD_ID_SIZE];
+       char *dir_name = NULL;
+       bool is_kallsyms = false;
+       int ret, fd;
+
+       if (target && build_id_cache__cached(target)) {
+               /* This is a cached buildid */
+               strncpy(sbuildid, target, SBUILD_ID_SIZE);
+               dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
+               goto found;
+       }
+
+       if (!target || !strcmp(target, DSO__NAME_KALLSYMS)) {
+               target = DSO__NAME_KALLSYMS;
+               is_kallsyms = true;
+               ret = sysfs__sprintf_build_id("/", sbuildid);
+       } else
+               ret = filename__sprintf_build_id(target, sbuildid);
+
+       if (ret < 0) {
+               pr_debug("Failed to get build-id from %s.\n", target);
+               return ret;
+       }
+
+       /* If we have no buildid cache, make it */
+       if (!build_id_cache__cached(sbuildid)) {
+               ret = build_id_cache__add_s(sbuildid, target,
+                                           is_kallsyms, NULL);
+               if (ret < 0) {
+                       pr_debug("Failed to add build-id cache: %s\n", target);
+                       return ret;
+               }
+       }
+
+       dir_name = build_id_cache__cachedir(sbuildid, target, is_kallsyms,
+                                           false);
+found:
+       if (!dir_name) {
+               pr_debug("Failed to get cache from %s\n", target);
+               return -ENOMEM;
+       }
+
+       snprintf(cpath, PATH_MAX, "%s/probes", dir_name);
+       fd = open(cpath, O_CREAT | O_RDWR, 0644);
+       if (fd < 0)
+               pr_debug("Failed to open cache(%d): %s\n", fd, cpath);
+       free(dir_name);
+       pcache->fd = fd;
+
+       return fd;
+}
+
+static int probe_cache__load(struct probe_cache *pcache)
+{
+       struct probe_cache_entry *entry = NULL;
+       char buf[MAX_CMDLEN], *p;
+       int ret = 0;
+       FILE *fp;
+
+       fp = fdopen(dup(pcache->fd), "r");
+       if (!fp)
+               return -EINVAL;
+
+       while (!feof(fp)) {
+               if (!fgets(buf, MAX_CMDLEN, fp))
+                       break;
+               p = strchr(buf, '\n');
+               if (p)
+                       *p = '\0';
+               /* #perf_probe_event or %sdt_event */
+               if (buf[0] == '#' || buf[0] == '%') {
+                       entry = probe_cache_entry__new(NULL);
+                       if (!entry) {
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+                       if (buf[0] == '%')
+                               entry->sdt = true;
+                       entry->spev = strdup(buf + 1);
+                       if (entry->spev)
+                               ret = parse_perf_probe_command(buf + 1,
+                                                               &entry->pev);
+                       else
+                               ret = -ENOMEM;
+                       if (ret < 0) {
+                               probe_cache_entry__delete(entry);
+                               goto out;
+                       }
+                       list_add_tail(&entry->node, &pcache->entries);
+               } else {        /* trace_probe_event */
+                       if (!entry) {
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       strlist__add(entry->tevlist, buf);
+               }
+       }
+out:
+       fclose(fp);
+       return ret;
+}
+
+static struct probe_cache *probe_cache__alloc(void)
+{
+       struct probe_cache *pcache = zalloc(sizeof(*pcache));
+
+       if (pcache) {
+               INIT_LIST_HEAD(&pcache->entries);
+               pcache->fd = -EINVAL;
+       }
+       return pcache;
+}
+
+void probe_cache__purge(struct probe_cache *pcache)
+{
+       struct probe_cache_entry *entry, *n;
+
+       list_for_each_entry_safe(entry, n, &pcache->entries, node) {
+               list_del_init(&entry->node);
+               probe_cache_entry__delete(entry);
+       }
+}
+
+void probe_cache__delete(struct probe_cache *pcache)
+{
+       if (!pcache)
+               return;
+
+       probe_cache__purge(pcache);
+       if (pcache->fd > 0)
+               close(pcache->fd);
+       free(pcache);
+}
+
+struct probe_cache *probe_cache__new(const char *target)
+{
+       struct probe_cache *pcache = probe_cache__alloc();
+       int ret;
+
+       if (!pcache)
+               return NULL;
+
+       ret = probe_cache__open(pcache, target);
+       if (ret < 0) {
+               pr_debug("Cache open error: %d\n", ret);
+               goto out_err;
+       }
+
+       ret = probe_cache__load(pcache);
+       if (ret < 0) {
+               pr_debug("Cache read error: %d\n", ret);
+               goto out_err;
+       }
+
+       return pcache;
+
+out_err:
+       probe_cache__delete(pcache);
+       return NULL;
+}
+
+static bool streql(const char *a, const char *b)
+{
+       if (a == b)
+               return true;
+
+       if (!a || !b)
+               return false;
+
+       return !strcmp(a, b);
+}
+
+struct probe_cache_entry *
+probe_cache__find(struct probe_cache *pcache, struct perf_probe_event *pev)
+{
+       struct probe_cache_entry *entry = NULL;
+       char *cmd = synthesize_perf_probe_command(pev);
+
+       if (!cmd)
+               return NULL;
+
+       for_each_probe_cache_entry(entry, pcache) {
+               if (pev->sdt) {
+                       if (entry->pev.event &&
+                           streql(entry->pev.event, pev->event) &&
+                           (!pev->group ||
+                            streql(entry->pev.group, pev->group)))
+                               goto found;
+
+                       continue;
+               }
+               /* Hit if same event name or same command-string */
+               if ((pev->event &&
+                    (streql(entry->pev.group, pev->group) &&
+                     streql(entry->pev.event, pev->event))) ||
+                   (!strcmp(entry->spev, cmd)))
+                       goto found;
+       }
+       entry = NULL;
+
+found:
+       free(cmd);
+       return entry;
+}
+
+struct probe_cache_entry *
+probe_cache__find_by_name(struct probe_cache *pcache,
+                         const char *group, const char *event)
+{
+       struct probe_cache_entry *entry = NULL;
+
+       for_each_probe_cache_entry(entry, pcache) {
+               /* Hit if same event name or same command-string */
+               if (streql(entry->pev.group, group) &&
+                   streql(entry->pev.event, event))
+                       goto found;
+       }
+       entry = NULL;
+
+found:
+       return entry;
+}
+
+int probe_cache__add_entry(struct probe_cache *pcache,
+                          struct perf_probe_event *pev,
+                          struct probe_trace_event *tevs, int ntevs)
+{
+       struct probe_cache_entry *entry = NULL;
+       char *command;
+       int i, ret = 0;
+
+       if (!pcache || !pev || !tevs || ntevs <= 0) {
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       /* Remove old cache entry */
+       entry = probe_cache__find(pcache, pev);
+       if (entry) {
+               list_del_init(&entry->node);
+               probe_cache_entry__delete(entry);
+       }
+
+       ret = -ENOMEM;
+       entry = probe_cache_entry__new(pev);
+       if (!entry)
+               goto out_err;
+
+       for (i = 0; i < ntevs; i++) {
+               if (!tevs[i].point.symbol)
+                       continue;
+
+               command = synthesize_probe_trace_command(&tevs[i]);
+               if (!command)
+                       goto out_err;
+               strlist__add(entry->tevlist, command);
+               free(command);
+       }
+       list_add_tail(&entry->node, &pcache->entries);
+       pr_debug("Added probe cache: %d\n", ntevs);
+       return 0;
+
+out_err:
+       pr_debug("Failed to add probe caches\n");
+       probe_cache_entry__delete(entry);
+       return ret;
+}
+
+#ifdef HAVE_GELF_GETNOTE_SUPPORT
+static unsigned long long sdt_note__get_addr(struct sdt_note *note)
+{
+       return note->bit32 ? (unsigned long long)note->addr.a32[0]
+                : (unsigned long long)note->addr.a64[0];
+}
+
+int probe_cache__scan_sdt(struct probe_cache *pcache, const char *pathname)
+{
+       struct probe_cache_entry *entry = NULL;
+       struct list_head sdtlist;
+       struct sdt_note *note;
+       char *buf;
+       char sdtgrp[64];
+       int ret;
+
+       INIT_LIST_HEAD(&sdtlist);
+       ret = get_sdt_note_list(&sdtlist, pathname);
+       if (ret < 0) {
+               pr_debug("Failed to get sdt note: %d\n", ret);
+               return ret;
+       }
+       list_for_each_entry(note, &sdtlist, note_list) {
+               ret = snprintf(sdtgrp, 64, "sdt_%s", note->provider);
+               if (ret < 0)
+                       break;
+               /* Try to find same-name entry */
+               entry = probe_cache__find_by_name(pcache, sdtgrp, note->name);
+               if (!entry) {
+                       entry = probe_cache_entry__new(NULL);
+                       if (!entry) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       entry->sdt = true;
+                       ret = asprintf(&entry->spev, "%s:%s=%s", sdtgrp,
+                                       note->name, note->name);
+                       if (ret < 0)
+                               break;
+                       entry->pev.event = strdup(note->name);
+                       entry->pev.group = strdup(sdtgrp);
+                       list_add_tail(&entry->node, &pcache->entries);
+               }
+               ret = asprintf(&buf, "p:%s/%s %s:0x%llx",
+                               sdtgrp, note->name, pathname,
+                               sdt_note__get_addr(note));
+               if (ret < 0)
+                       break;
+               strlist__add(entry->tevlist, buf);
+               free(buf);
+               entry = NULL;
+       }
+       if (entry) {
+               list_del_init(&entry->node);
+               probe_cache_entry__delete(entry);
+       }
+       cleanup_sdt_note_list(&sdtlist);
+       return ret;
+}
+#endif
+
+static int probe_cache_entry__write(struct probe_cache_entry *entry, int fd)
+{
+       struct str_node *snode;
+       struct stat st;
+       struct iovec iov[3];
+       const char *prefix = entry->sdt ? "%" : "#";
+       int ret;
+       /* Save stat for rollback */
+       ret = fstat(fd, &st);
+       if (ret < 0)
+               return ret;
+
+       pr_debug("Writing cache: %s%s\n", prefix, entry->spev);
+       iov[0].iov_base = (void *)prefix; iov[0].iov_len = 1;
+       iov[1].iov_base = entry->spev; iov[1].iov_len = strlen(entry->spev);
+       iov[2].iov_base = (void *)"\n"; iov[2].iov_len = 1;
+       ret = writev(fd, iov, 3);
+       if (ret < (int)iov[1].iov_len + 2)
+               goto rollback;
+
+       strlist__for_each_entry(snode, entry->tevlist) {
+               iov[0].iov_base = (void *)snode->s;
+               iov[0].iov_len = strlen(snode->s);
+               iov[1].iov_base = (void *)"\n"; iov[1].iov_len = 1;
+               ret = writev(fd, iov, 2);
+               if (ret < (int)iov[0].iov_len + 1)
+                       goto rollback;
+       }
+       return 0;
+
+rollback:
+       /* Rollback to avoid cache file corruption */
+       if (ret > 0)
+               ret = -1;
+       if (ftruncate(fd, st.st_size) < 0)
+               ret = -2;
+
+       return ret;
+}
+
+int probe_cache__commit(struct probe_cache *pcache)
+{
+       struct probe_cache_entry *entry;
+       int ret = 0;
+
+       /* TBD: if we do not update existing entries, skip it */
+       ret = lseek(pcache->fd, 0, SEEK_SET);
+       if (ret < 0)
+               goto out;
+
+       ret = ftruncate(pcache->fd, 0);
+       if (ret < 0)
+               goto out;
+
+       for_each_probe_cache_entry(entry, pcache) {
+               ret = probe_cache_entry__write(entry, pcache->fd);
+               pr_debug("Cache committed: %d\n", ret);
+               if (ret < 0)
+                       break;
+       }
+out:
+       return ret;
+}
+
+static bool probe_cache_entry__compare(struct probe_cache_entry *entry,
+                                      struct strfilter *filter)
+{
+       char buf[128], *ptr = entry->spev;
+
+       if (entry->pev.event) {
+               snprintf(buf, 128, "%s:%s", entry->pev.group, entry->pev.event);
+               ptr = buf;
+       }
+       return strfilter__compare(filter, ptr);
+}
+
+int probe_cache__filter_purge(struct probe_cache *pcache,
+                             struct strfilter *filter)
+{
+       struct probe_cache_entry *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, &pcache->entries, node) {
+               if (probe_cache_entry__compare(entry, filter)) {
+                       pr_info("Removed cached event: %s\n", entry->spev);
+                       list_del_init(&entry->node);
+                       probe_cache_entry__delete(entry);
+               }
+       }
+       return 0;
+}
+
+static int probe_cache__show_entries(struct probe_cache *pcache,
+                                    struct strfilter *filter)
+{
+       struct probe_cache_entry *entry;
+
+       for_each_probe_cache_entry(entry, pcache) {
+               if (probe_cache_entry__compare(entry, filter))
+                       printf("%s\n", entry->spev);
+       }
+       return 0;
+}
+
+/* Show all cached probes */
+int probe_cache__show_all_caches(struct strfilter *filter)
+{
+       struct probe_cache *pcache;
+       struct strlist *bidlist;
+       struct str_node *nd;
+       char *buf = strfilter__string(filter);
+
+       pr_debug("list cache with filter: %s\n", buf);
+       free(buf);
+
+       bidlist = build_id_cache__list_all(true);
+       if (!bidlist) {
+               pr_debug("Failed to get buildids: %d\n", errno);
+               return -EINVAL;
+       }
+       strlist__for_each_entry(nd, bidlist) {
+               pcache = probe_cache__new(nd->s);
+               if (!pcache)
+                       continue;
+               if (!list_empty(&pcache->entries)) {
+                       buf = build_id_cache__origname(nd->s);
+                       printf("%s (%s):\n", buf, nd->s);
+                       free(buf);
+                       probe_cache__show_entries(pcache, filter);
+               }
+               probe_cache__delete(pcache);
+       }
+       strlist__delete(bidlist);
+
+       return 0;
+}
index 18ac9cf51c3433438eb3036d340f34e5975161bc..9577b5c0b487ee6adb017555521ad6dcd497a621 100644 (file)
@@ -5,9 +5,27 @@
 #include "strfilter.h"
 #include "probe-event.h"
 
+/* Cache of probe definitions */
+struct probe_cache_entry {
+       struct list_head        node;
+       bool                    sdt;
+       struct perf_probe_event pev;
+       char                    *spev;
+       struct strlist          *tevlist;
+};
+
+struct probe_cache {
+       int     fd;
+       struct list_head entries;
+};
+
 #define PF_FL_UPROBE   1
 #define PF_FL_RW       2
+#define for_each_probe_cache_entry(entry, pcache) \
+       list_for_each_entry(entry, &pcache->entries, node)
 
+/* probe-file.c depends on libelf */
+#ifdef HAVE_LIBELF_SUPPORT
 int probe_file__open(int flag);
 int probe_file__open_both(int *kfd, int *ufd, int flag);
 struct strlist *probe_file__get_namelist(int fd);
@@ -18,5 +36,29 @@ int probe_file__get_events(int fd, struct strfilter *filter,
                                  struct strlist *plist);
 int probe_file__del_strlist(int fd, struct strlist *namelist);
 
+int probe_cache_entry__get_event(struct probe_cache_entry *entry,
+                                struct probe_trace_event **tevs);
 
+struct probe_cache *probe_cache__new(const char *target);
+int probe_cache__add_entry(struct probe_cache *pcache,
+                          struct perf_probe_event *pev,
+                          struct probe_trace_event *tevs, int ntevs);
+int probe_cache__scan_sdt(struct probe_cache *pcache, const char *pathname);
+int probe_cache__commit(struct probe_cache *pcache);
+void probe_cache__purge(struct probe_cache *pcache);
+void probe_cache__delete(struct probe_cache *pcache);
+int probe_cache__filter_purge(struct probe_cache *pcache,
+                             struct strfilter *filter);
+struct probe_cache_entry *probe_cache__find(struct probe_cache *pcache,
+                                           struct perf_probe_event *pev);
+struct probe_cache_entry *probe_cache__find_by_name(struct probe_cache *pcache,
+                                       const char *group, const char *event);
+int probe_cache__show_all_caches(struct strfilter *filter);
+#else  /* ! HAVE_LIBELF_SUPPORT */
+static inline struct probe_cache *probe_cache__new(const char *tgt __maybe_unused)
+{
+       return NULL;
+}
+#define probe_cache__delete(pcache) do {} while (0)
+#endif
 #endif
index 1259839dbf6d4982cdb814edbc164f1439764a97..f2d9ff064e2de720247e77f9645faf94b6b4ce57 100644 (file)
@@ -381,7 +381,7 @@ formatted:
                if (ret >= 16)
                        ret = -E2BIG;
                pr_warning("Failed to convert variable type: %s\n",
-                          strerror_r(-ret, sbuf, sizeof(sbuf)));
+                          str_error_r(-ret, sbuf, sizeof(sbuf)));
                return ret;
        }
        tvar->type = strdup(buf);
@@ -809,7 +809,7 @@ static int find_lazy_match_lines(struct intlist *list,
        fp = fopen(fname, "r");
        if (!fp) {
                pr_warning("Failed to open %s: %s\n", fname,
-                          strerror_r(errno, sbuf, sizeof(sbuf)));
+                          str_error_r(errno, sbuf, sizeof(sbuf)));
                return -errno;
        }
 
index 36c6862119e32c1fabd5001b892eb9bac0bd3ed9..5065ec98049cfb65c75ade276c05fa19a7c7f0b1 100644 (file)
@@ -13,6 +13,8 @@ util/cpumap.c
 ../lib/bitmap.c
 ../lib/find_bit.c
 ../lib/hweight.c
+../lib/str_error_r.c
+../lib/vsprintf.c
 util/thread_map.c
 util/util.c
 util/xyarray.c
index 98f127abfa42a2d9b8d06b24b6c17090781c87fb..a5fbc012e3df974adcec3581386ff80b6fdedf6a 100644 (file)
@@ -2,6 +2,7 @@
 #include <structmember.h>
 #include <inttypes.h>
 #include <poll.h>
+#include <linux/err.h>
 #include "evlist.h"
 #include "evsel.h"
 #include "event.h"
@@ -47,6 +48,7 @@ PyMODINIT_FUNC initperf(void);
 
 struct pyrf_event {
        PyObject_HEAD
+       struct perf_evsel *evsel;
        struct perf_sample sample;
        union perf_event   event;
 };
@@ -288,6 +290,85 @@ static PyObject *pyrf_sample_event__repr(struct pyrf_event *pevent)
        return ret;
 }
 
+static bool is_tracepoint(struct pyrf_event *pevent)
+{
+       return pevent->evsel->attr.type == PERF_TYPE_TRACEPOINT;
+}
+
+static PyObject*
+tracepoint_field(struct pyrf_event *pe, struct format_field *field)
+{
+       struct pevent *pevent = field->event->pevent;
+       void *data = pe->sample.raw_data;
+       PyObject *ret = NULL;
+       unsigned long long val;
+       unsigned int offset, len;
+
+       if (field->flags & FIELD_IS_ARRAY) {
+               offset = field->offset;
+               len    = field->size;
+               if (field->flags & FIELD_IS_DYNAMIC) {
+                       val     = pevent_read_number(pevent, data + offset, len);
+                       offset  = val;
+                       len     = offset >> 16;
+                       offset &= 0xffff;
+               }
+               if (field->flags & FIELD_IS_STRING &&
+                   is_printable_array(data + offset, len)) {
+                       ret = PyString_FromString((char *)data + offset);
+               } else {
+                       ret = PyByteArray_FromStringAndSize((const char *) data + offset, len);
+                       field->flags &= ~FIELD_IS_STRING;
+               }
+       } else {
+               val = pevent_read_number(pevent, data + field->offset,
+                                        field->size);
+               if (field->flags & FIELD_IS_POINTER)
+                       ret = PyLong_FromUnsignedLong((unsigned long) val);
+               else if (field->flags & FIELD_IS_SIGNED)
+                       ret = PyLong_FromLong((long) val);
+               else
+                       ret = PyLong_FromUnsignedLong((unsigned long) val);
+       }
+
+       return ret;
+}
+
+static PyObject*
+get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name)
+{
+       const char *str = PyString_AsString(PyObject_Str(attr_name));
+       struct perf_evsel *evsel = pevent->evsel;
+       struct format_field *field;
+
+       if (!evsel->tp_format) {
+               struct event_format *tp_format;
+
+               tp_format = trace_event__tp_format_id(evsel->attr.config);
+               if (!tp_format)
+                       return NULL;
+
+               evsel->tp_format = tp_format;
+       }
+
+       field = pevent_find_any_field(evsel->tp_format, str);
+       if (!field)
+               return NULL;
+
+       return tracepoint_field(pevent, field);
+}
+
+static PyObject*
+pyrf_sample_event__getattro(struct pyrf_event *pevent, PyObject *attr_name)
+{
+       PyObject *obj = NULL;
+
+       if (is_tracepoint(pevent))
+               obj = get_tracepoint_field(pevent, attr_name);
+
+       return obj ?: PyObject_GenericGetAttr((PyObject *) pevent, attr_name);
+}
+
 static PyTypeObject pyrf_sample_event__type = {
        PyVarObject_HEAD_INIT(NULL, 0)
        .tp_name        = "perf.sample_event",
@@ -296,6 +377,7 @@ static PyTypeObject pyrf_sample_event__type = {
        .tp_doc         = pyrf_sample_event__doc,
        .tp_members     = pyrf_sample_event__members,
        .tp_repr        = (reprfunc)pyrf_sample_event__repr,
+       .tp_getattro    = (getattrofunc) pyrf_sample_event__getattro,
 };
 
 static char pyrf_context_switch_event__doc[] = PyDoc_STR("perf context_switch event object.");
@@ -653,6 +735,7 @@ static int pyrf_evsel__init(struct pyrf_evsel *pevsel,
        attr.precise_ip     = precise_ip;
        attr.mmap_data      = mmap_data;
        attr.sample_id_all  = sample_id_all;
+       attr.size           = sizeof(attr);
 
        perf_evsel__init(&pevsel->evsel, &attr, idx);
        return 0;
@@ -863,13 +946,22 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
        if (event != NULL) {
                PyObject *pyevent = pyrf_event__new(event);
                struct pyrf_event *pevent = (struct pyrf_event *)pyevent;
-
-               perf_evlist__mmap_consume(evlist, cpu);
+               struct perf_evsel *evsel;
 
                if (pyevent == NULL)
                        return PyErr_NoMemory();
 
-               err = perf_evlist__parse_sample(evlist, event, &pevent->sample);
+               evsel = perf_evlist__event2evsel(evlist, event);
+               if (!evsel)
+                       return Py_None;
+
+               pevent->evsel = evsel;
+
+               err = perf_evsel__parse_sample(evsel, event, &pevent->sample);
+
+               /* Consume the even only after we parsed it out. */
+               perf_evlist__mmap_consume(evlist, cpu);
+
                if (err)
                        return PyErr_Format(PyExc_OSError,
                                            "perf: can't parse sample, err=%d", err);
@@ -957,7 +1049,7 @@ static PyObject *pyrf_evlist__item(PyObject *obj, Py_ssize_t i)
        if (i >= pevlist->evlist.nr_entries)
                return NULL;
 
-       evlist__for_each(&pevlist->evlist, pos) {
+       evlist__for_each_entry(&pevlist->evlist, pos) {
                if (i-- == 0)
                        break;
        }
@@ -1073,7 +1165,32 @@ static struct {
        { .name = NULL, },
 };
 
+static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel,
+                                 PyObject *args, PyObject *kwargs)
+{
+       struct event_format *tp_format;
+       static char *kwlist[] = { "sys", "name", NULL };
+       char *sys  = NULL;
+       char *name = NULL;
+
+       if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ss", kwlist,
+                                        &sys, &name))
+               return NULL;
+
+       tp_format = trace_event__tp_format(sys, name);
+       if (IS_ERR(tp_format))
+               return PyInt_FromLong(-1);
+
+       return PyInt_FromLong(tp_format->id);
+}
+
 static PyMethodDef perf__methods[] = {
+       {
+               .ml_name  = "tracepoint",
+               .ml_meth  = (PyCFunction) pyrf__tracepoint,
+               .ml_flags = METH_VARARGS | METH_KEYWORDS,
+               .ml_doc   = PyDoc_STR("Get tracepoint config.")
+       },
        { .ml_name = NULL, }
 };
 
@@ -1100,6 +1217,33 @@ PyMODINIT_FUNC initperf(void)
        Py_INCREF(&pyrf_evsel__type);
        PyModule_AddObject(module, "evsel", (PyObject*)&pyrf_evsel__type);
 
+       Py_INCREF(&pyrf_mmap_event__type);
+       PyModule_AddObject(module, "mmap_event", (PyObject *)&pyrf_mmap_event__type);
+
+       Py_INCREF(&pyrf_lost_event__type);
+       PyModule_AddObject(module, "lost_event", (PyObject *)&pyrf_lost_event__type);
+
+       Py_INCREF(&pyrf_comm_event__type);
+       PyModule_AddObject(module, "comm_event", (PyObject *)&pyrf_comm_event__type);
+
+       Py_INCREF(&pyrf_task_event__type);
+       PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
+
+       Py_INCREF(&pyrf_throttle_event__type);
+       PyModule_AddObject(module, "throttle_event", (PyObject *)&pyrf_throttle_event__type);
+
+       Py_INCREF(&pyrf_task_event__type);
+       PyModule_AddObject(module, "task_event", (PyObject *)&pyrf_task_event__type);
+
+       Py_INCREF(&pyrf_read_event__type);
+       PyModule_AddObject(module, "read_event", (PyObject *)&pyrf_read_event__type);
+
+       Py_INCREF(&pyrf_sample_event__type);
+       PyModule_AddObject(module, "sample_event", (PyObject *)&pyrf_sample_event__type);
+
+       Py_INCREF(&pyrf_context_switch_event__type);
+       PyModule_AddObject(module, "switch_event", (PyObject *)&pyrf_context_switch_event__type);
+
        Py_INCREF(&pyrf_thread_map__type);
        PyModule_AddObject(module, "thread_map", (PyObject*)&pyrf_thread_map__type);
 
index c6d4ee2de752dbc5d44d90cf0e35a96f560d660b..639d1da2f9786dee3b9e535724b5f1beaed3edda 100644 (file)
@@ -1,5 +1,7 @@
-#include "cache.h"
+#include <stdlib.h>
+#include "strbuf.h"
 #include "quote.h"
+#include "util.h"
 
 /* Help to copy the thing properly quoted for the shell safety.
  * any single quote is replaced with '\'', any exclamation point
index e1ec19146fb059cb38b6a639c49c6e8710562b44..055ca45bed992088cbe078c39ff42110b0cea690 100644 (file)
@@ -2,7 +2,6 @@
 #define __PERF_QUOTE_H
 
 #include <stddef.h>
-#include <stdio.h>
 
 /* Help to copy the thing properly quoted for the shell safety.
  * any single quote is replaced with '\'', any exclamation point
@@ -24,6 +23,8 @@
  * sq_quote() in a real application.
  */
 
+struct strbuf;
+
 int sq_quote_argv(struct strbuf *, const char **argv, size_t maxlen);
 
 #endif /* __PERF_QUOTE_H */
index abc76e3d309831182903d9159d186174962651a1..808cc45611fe9015ef113ca7484625d9fb3b3ba0 100644 (file)
@@ -35,7 +35,7 @@ DEFINE_RB_RESORT_RB(threads, strcmp(a->thread->shortname,
 
        struct rb_node *nd;
 
-       resort_rb__for_each(nd, threads) {
+       resort_rb__for_each_entry(nd, threads) {
                struct thread *t = threads_entry;
                printf("%s: %d\n", t->shortname, t->tid);
        }
@@ -123,7 +123,7 @@ static void __name##_sorted__init_entry(struct rb_node *nd,                 \
 struct __name##_sorted_entry *__name##_entry;                                  \
 struct __name##_sorted *__name = __name##_sorted__new
 
-#define resort_rb__for_each(__nd, __name)                                      \
+#define resort_rb__for_each_entry(__nd, __name)                                        \
        for (__nd = rb_first(&__name->entries);                                 \
             __name##_entry = rb_entry(__nd, struct __name##_sorted_entry,      \
                                       rb_node), __nd;                          \
index 481792c7484bd7109be072b6ea22f3fb8cf13089..98bf584853ea28e0fd9ab3828aa1e83f9d4010e5 100644 (file)
@@ -148,7 +148,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
 
        use_comm_exec = perf_can_comm_exec();
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                perf_evsel__config(evsel, opts, callchain);
                if (evsel->tracking && use_comm_exec)
                        evsel->attr.comm_exec = 1;
@@ -161,18 +161,18 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
                 * match the id.
                 */
                use_sample_identifier = perf_can_sample_identifier();
-               evlist__for_each(evlist, evsel)
+               evlist__for_each_entry(evlist, evsel)
                        perf_evsel__set_sample_id(evsel, use_sample_identifier);
        } else if (evlist->nr_entries > 1) {
                struct perf_evsel *first = perf_evlist__first(evlist);
 
-               evlist__for_each(evlist, evsel) {
+               evlist__for_each_entry(evlist, evsel) {
                        if (evsel->attr.sample_type == first->attr.sample_type)
                                continue;
                        use_sample_identifier = perf_can_sample_identifier();
                        break;
                }
-               evlist__for_each(evlist, evsel)
+               evlist__for_each_entry(evlist, evsel)
                        perf_evsel__set_sample_id(evsel, use_sample_identifier);
        }
 
index ff134700bf30dfa1f3bf772220ebb681daf077b2..e0203b97947483d82042c3a568f01c73878eb6a9 100644 (file)
@@ -273,7 +273,7 @@ static PyObject *get_field_numeric_entry(struct event_format *event,
                struct format_field *field, void *data)
 {
        bool is_array = field->flags & FIELD_IS_ARRAY;
-       PyObject *obj, *list = NULL;
+       PyObject *obj = NULL, *list = NULL;
        unsigned long long val;
        unsigned int item_size, n_items, i;
 
@@ -386,13 +386,12 @@ exit:
        return pylist;
 }
 
-
 static void python_process_tracepoint(struct perf_sample *sample,
                                      struct perf_evsel *evsel,
                                      struct addr_location *al)
 {
        struct event_format *event = evsel->tp_format;
-       PyObject *handler, *context, *t, *obj, *callchain;
+       PyObject *handler, *context, *t, *obj = NULL, *callchain;
        PyObject *dict = NULL;
        static char handler_name[256];
        struct format_field *field;
@@ -457,14 +456,26 @@ static void python_process_tracepoint(struct perf_sample *sample,
                pydict_set_item_string_decref(dict, "common_callchain", callchain);
        }
        for (field = event->format.fields; field; field = field->next) {
-               if (field->flags & FIELD_IS_STRING) {
-                       int offset;
+               unsigned int offset, len;
+               unsigned long long val;
+
+               if (field->flags & FIELD_IS_ARRAY) {
+                       offset = field->offset;
+                       len    = field->size;
                        if (field->flags & FIELD_IS_DYNAMIC) {
-                               offset = *(int *)(data + field->offset);
+                               val     = pevent_read_number(scripting_context->pevent,
+                                                            data + offset, len);
+                               offset  = val;
+                               len     = offset >> 16;
                                offset &= 0xffff;
-                       } else
-                               offset = field->offset;
-                       obj = PyString_FromString((char *)data + offset);
+                       }
+                       if (field->flags & FIELD_IS_STRING &&
+                           is_printable_array(data + offset, len)) {
+                               obj = PyString_FromString((char *) data + offset);
+                       } else {
+                               obj = PyByteArray_FromStringAndSize((const char *) data + offset, len);
+                               field->flags &= ~FIELD_IS_STRING;
+                       }
                } else { /* FIELD_IS_NUMERIC */
                        obj = get_field_numeric_entry(event, field, data);
                }
index 5214974e841a906668b2b2ba818147347dc8efaf..5d61242a6e648a1b65c8b9d16c93e02f8ed655cd 100644 (file)
@@ -83,7 +83,7 @@ static bool perf_session__has_comm_exec(struct perf_session *session)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(session->evlist, evsel) {
+       evlist__for_each_entry(session->evlist, evsel) {
                if (evsel->attr.comm_exec)
                        return true;
        }
@@ -178,6 +178,8 @@ static void perf_session__delete_threads(struct perf_session *session)
 
 void perf_session__delete(struct perf_session *session)
 {
+       if (session == NULL)
+               return;
        auxtrace__free(session);
        auxtrace_index__free(&session->auxtrace_index);
        perf_session__destroy_kernel_maps(session);
@@ -593,6 +595,7 @@ do {                                                \
        if (bswap_safe(f, 0))                   \
                attr->f = bswap_##sz(attr->f);  \
 } while(0)
+#define bswap_field_16(f) bswap_field(f, 16)
 #define bswap_field_32(f) bswap_field(f, 32)
 #define bswap_field_64(f) bswap_field(f, 64)
 
@@ -608,6 +611,7 @@ do {                                                \
        bswap_field_64(sample_regs_user);
        bswap_field_32(sample_stack_user);
        bswap_field_32(aux_watermark);
+       bswap_field_16(sample_max_stack);
 
        /*
         * After read_format are bitfields. Check read_format because
@@ -1495,10 +1499,27 @@ int perf_session__register_idle_thread(struct perf_session *session)
        return err;
 }
 
+static void
+perf_session__warn_order(const struct perf_session *session)
+{
+       const struct ordered_events *oe = &session->ordered_events;
+       struct perf_evsel *evsel;
+       bool should_warn = true;
+
+       evlist__for_each_entry(session->evlist, evsel) {
+               if (evsel->attr.write_backward)
+                       should_warn = false;
+       }
+
+       if (!should_warn)
+               return;
+       if (oe->nr_unordered_events != 0)
+               ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
+}
+
 static void perf_session__warn_about_errors(const struct perf_session *session)
 {
        const struct events_stats *stats = &session->evlist->stats;
-       const struct ordered_events *oe = &session->ordered_events;
 
        if (session->tool->lost == perf_event__process_lost &&
            stats->nr_events[PERF_RECORD_LOST] != 0) {
@@ -1555,8 +1576,7 @@ static void perf_session__warn_about_errors(const struct perf_session *session)
                            stats->nr_unprocessable_samples);
        }
 
-       if (oe->nr_unordered_events != 0)
-               ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
+       perf_session__warn_order(session);
 
        events_stats__auxtrace_error_warn(stats);
 
@@ -1868,7 +1888,7 @@ bool perf_session__has_traces(struct perf_session *session, const char *msg)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(session->evlist, evsel) {
+       evlist__for_each_entry(session->evlist, evsel) {
                if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
                        return true;
        }
@@ -1950,7 +1970,7 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
 {
        struct perf_evsel *pos;
 
-       evlist__for_each(session->evlist, pos) {
+       evlist__for_each_entry(session->evlist, pos) {
                if (pos->attr.type == type)
                        return pos;
        }
@@ -2105,7 +2125,7 @@ int perf_event__synthesize_id_index(struct perf_tool *tool,
        max_nr = (UINT16_MAX - sizeof(struct id_index_event)) /
                 sizeof(struct id_index_entry);
 
-       evlist__for_each(evlist, evsel)
+       evlist__for_each_entry(evlist, evsel)
                nr += evsel->ids;
 
        n = nr > max_nr ? max_nr : nr;
@@ -2118,7 +2138,7 @@ int perf_event__synthesize_id_index(struct perf_tool *tool,
        ev->id_index.header.size = sz;
        ev->id_index.nr = n;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                u32 j;
 
                for (j = 0; j < evsel->ids; j++) {
index c4e9bd70723c5b26df4499af5d32afdb86109c31..947d21f3839838c433430b01fe52165522f87295 100644 (file)
@@ -79,8 +79,8 @@ static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
 {
        const char *comm = thread__comm_str(he->thread);
 
-       width = max(7U, width) - 6;
-       return repsep_snprintf(bf, size, "%5d:%-*.*s", he->thread->tid,
+       width = max(7U, width) - 8;
+       return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
                               width, width, comm ?: "");
 }
 
@@ -95,7 +95,7 @@ static int hist_entry__thread_filter(struct hist_entry *he, int type, const void
 }
 
 struct sort_entry sort_thread = {
-       .se_header      = "  Pid:Command",
+       .se_header      = "    Pid:Command",
        .se_cmp         = sort__thread_cmp,
        .se_snprintf    = hist_entry__thread_snprintf,
        .se_filter      = hist_entry__thread_filter,
@@ -1218,7 +1218,7 @@ struct sort_entry sort_mem_daddr_dso = {
        .se_header      = "Data Object",
        .se_cmp         = sort__dso_daddr_cmp,
        .se_snprintf    = hist_entry__dso_daddr_snprintf,
-       .se_width_idx   = HISTC_MEM_DADDR_SYMBOL,
+       .se_width_idx   = HISTC_MEM_DADDR_DSO,
 };
 
 struct sort_entry sort_mem_locked = {
@@ -1488,7 +1488,7 @@ void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
 }
 
 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
-                             struct perf_evsel *evsel)
+                             struct hists *hists)
 {
        struct hpp_sort_entry *hse;
        size_t len = fmt->user_len;
@@ -1496,14 +1496,14 @@ static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
        hse = container_of(fmt, struct hpp_sort_entry, hpp);
 
        if (!len)
-               len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
+               len = hists__col_len(hists, hse->se->se_width_idx);
 
        return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
 }
 
 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
                             struct perf_hpp *hpp __maybe_unused,
-                            struct perf_evsel *evsel)
+                            struct hists *hists)
 {
        struct hpp_sort_entry *hse;
        size_t len = fmt->user_len;
@@ -1511,7 +1511,7 @@ static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
        hse = container_of(fmt, struct hpp_sort_entry, hpp);
 
        if (!len)
-               len = hists__col_len(evsel__hists(evsel), hse->se->se_width_idx);
+               len = hists__col_len(hists, hse->se->se_width_idx);
 
        return len;
 }
@@ -1793,7 +1793,7 @@ static void update_dynamic_len(struct hpp_dynamic_entry *hde,
 }
 
 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
-                             struct perf_evsel *evsel __maybe_unused)
+                             struct hists *hists __maybe_unused)
 {
        struct hpp_dynamic_entry *hde;
        size_t len = fmt->user_len;
@@ -1808,7 +1808,7 @@ static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
 
 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
                             struct perf_hpp *hpp __maybe_unused,
-                            struct perf_evsel *evsel __maybe_unused)
+                            struct hists *hists __maybe_unused)
 {
        struct hpp_dynamic_entry *hde;
        size_t len = fmt->user_len;
@@ -2069,7 +2069,7 @@ static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_nam
        }
 
        full_name = !!strchr(event_name, ':');
-       evlist__for_each(evlist, pos) {
+       evlist__for_each_entry(evlist, pos) {
                /* case 2 */
                if (full_name && !strcmp(pos->name, event_name))
                        return pos;
@@ -2125,7 +2125,7 @@ static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
        int ret;
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
                        continue;
 
@@ -2143,7 +2143,7 @@ static int add_all_matching_fields(struct perf_evlist *evlist,
        struct perf_evsel *evsel;
        struct format_field *field;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
                        continue;
 
@@ -2381,6 +2381,9 @@ static int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
                if (sort__mode != SORT_MODE__MEMORY)
                        return -EINVAL;
 
+               if (sd->entry == &sort_mem_dcacheline && cacheline_size == 0)
+                       return -EINVAL;
+
                if (sd->entry == &sort_mem_daddr_sym)
                        list->sym = 1;
 
@@ -2424,7 +2427,10 @@ static int setup_sort_list(struct perf_hpp_list *list, char *str,
                if (*tok) {
                        ret = sort_dimension__add(list, tok, evlist, level);
                        if (ret == -EINVAL) {
-                               error("Invalid --sort key: `%s'", tok);
+                               if (!cacheline_size && !strncasecmp(tok, "dcacheline", strlen(tok)))
+                                       error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
+                               else
+                                       error("Invalid --sort key: `%s'", tok);
                                break;
                        } else if (ret == -ESRCH) {
                                error("Unknown --sort key: `%s'", tok);
@@ -2456,7 +2462,7 @@ static const char *get_default_sort_order(struct perf_evlist *evlist)
        if (evlist == NULL)
                goto out_no_evlist;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (evsel->attr.type != PERF_TYPE_TRACEPOINT) {
                        use_trace = false;
                        break;
index ebb59cacd092fa919e45a447a6e3e3ad70a0c880..7ca37ea1739559e1e3f064c9260d5d078cb5b84f 100644 (file)
@@ -67,6 +67,11 @@ struct hist_entry_diff {
        };
 };
 
+struct hist_entry_ops {
+       void    *(*new)(size_t size);
+       void    (*free)(void *ptr);
+};
+
 /**
  * struct hist_entry - histogram entry
  *
@@ -125,6 +130,7 @@ struct hist_entry {
        void                    *trace_output;
        struct perf_hpp_list    *hpp_list;
        struct hist_entry       *parent_he;
+       struct hist_entry_ops   *ops;
        union {
                /* this is for hierarchical entry structure */
                struct {
index aa9efe08762b9620c9d477384e524de81f8b137a..8a2bbd2a4d828c83deb2f203d25d5e452752c90b 100644 (file)
@@ -36,6 +36,11 @@ static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
 static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
 static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
 static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_topdown_total_slots[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_topdown_slots_issued[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_topdown_slots_retired[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_topdown_fetch_bubbles[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_topdown_recovery_bubbles[NUM_CTX][MAX_NR_CPUS];
 static bool have_frontend_stalled;
 
 struct stats walltime_nsecs_stats;
@@ -82,6 +87,11 @@ void perf_stat__reset_shadow_stats(void)
                sizeof(runtime_transaction_stats));
        memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
        memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
+       memset(runtime_topdown_total_slots, 0, sizeof(runtime_topdown_total_slots));
+       memset(runtime_topdown_slots_retired, 0, sizeof(runtime_topdown_slots_retired));
+       memset(runtime_topdown_slots_issued, 0, sizeof(runtime_topdown_slots_issued));
+       memset(runtime_topdown_fetch_bubbles, 0, sizeof(runtime_topdown_fetch_bubbles));
+       memset(runtime_topdown_recovery_bubbles, 0, sizeof(runtime_topdown_recovery_bubbles));
 }
 
 /*
@@ -105,6 +115,16 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
                update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
        else if (perf_stat_evsel__is(counter, ELISION_START))
                update_stats(&runtime_elision_stats[ctx][cpu], count[0]);
+       else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
+               update_stats(&runtime_topdown_total_slots[ctx][cpu], count[0]);
+       else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
+               update_stats(&runtime_topdown_slots_issued[ctx][cpu], count[0]);
+       else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
+               update_stats(&runtime_topdown_slots_retired[ctx][cpu], count[0]);
+       else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
+               update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu],count[0]);
+       else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
+               update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count[0]);
        else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
                update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]);
        else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
@@ -302,6 +322,107 @@ static void print_ll_cache_misses(int cpu,
        out->print_metric(out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
 }
 
+/*
+ * High level "TopDown" CPU core pipe line bottleneck break down.
+ *
+ * Basic concept following
+ * Yasin, A Top Down Method for Performance analysis and Counter architecture
+ * ISPASS14
+ *
+ * The CPU pipeline is divided into 4 areas that can be bottlenecks:
+ *
+ * Frontend -> Backend -> Retiring
+ * BadSpeculation in addition means out of order execution that is thrown away
+ * (for example branch mispredictions)
+ * Frontend is instruction decoding.
+ * Backend is execution, like computation and accessing data in memory
+ * Retiring is good execution that is not directly bottlenecked
+ *
+ * The formulas are computed in slots.
+ * A slot is an entry in the pipeline each for the pipeline width
+ * (for example a 4-wide pipeline has 4 slots for each cycle)
+ *
+ * Formulas:
+ * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
+ *                     TotalSlots
+ * Retiring = SlotsRetired / TotalSlots
+ * FrontendBound = FetchBubbles / TotalSlots
+ * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
+ *
+ * The kernel provides the mapping to the low level CPU events and any scaling
+ * needed for the CPU pipeline width, for example:
+ *
+ * TotalSlots = Cycles * 4
+ *
+ * The scaling factor is communicated in the sysfs unit.
+ *
+ * In some cases the CPU may not be able to measure all the formulas due to
+ * missing events. In this case multiple formulas are combined, as possible.
+ *
+ * Full TopDown supports more levels to sub-divide each area: for example
+ * BackendBound into computing bound and memory bound. For now we only
+ * support Level 1 TopDown.
+ */
+
+static double sanitize_val(double x)
+{
+       if (x < 0 && x >= -0.02)
+               return 0.0;
+       return x;
+}
+
+static double td_total_slots(int ctx, int cpu)
+{
+       return avg_stats(&runtime_topdown_total_slots[ctx][cpu]);
+}
+
+static double td_bad_spec(int ctx, int cpu)
+{
+       double bad_spec = 0;
+       double total_slots;
+       double total;
+
+       total = avg_stats(&runtime_topdown_slots_issued[ctx][cpu]) -
+               avg_stats(&runtime_topdown_slots_retired[ctx][cpu]) +
+               avg_stats(&runtime_topdown_recovery_bubbles[ctx][cpu]);
+       total_slots = td_total_slots(ctx, cpu);
+       if (total_slots)
+               bad_spec = total / total_slots;
+       return sanitize_val(bad_spec);
+}
+
+static double td_retiring(int ctx, int cpu)
+{
+       double retiring = 0;
+       double total_slots = td_total_slots(ctx, cpu);
+       double ret_slots = avg_stats(&runtime_topdown_slots_retired[ctx][cpu]);
+
+       if (total_slots)
+               retiring = ret_slots / total_slots;
+       return retiring;
+}
+
+static double td_fe_bound(int ctx, int cpu)
+{
+       double fe_bound = 0;
+       double total_slots = td_total_slots(ctx, cpu);
+       double fetch_bub = avg_stats(&runtime_topdown_fetch_bubbles[ctx][cpu]);
+
+       if (total_slots)
+               fe_bound = fetch_bub / total_slots;
+       return fe_bound;
+}
+
+static double td_be_bound(int ctx, int cpu)
+{
+       double sum = (td_fe_bound(ctx, cpu) +
+                     td_bad_spec(ctx, cpu) +
+                     td_retiring(ctx, cpu));
+       if (sum == 0)
+               return 0;
+       return sanitize_val(1.0 - sum);
+}
+
 void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
                                   double avg, int cpu,
                                   struct perf_stat_output_ctx *out)
@@ -309,6 +430,7 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
        void *ctxp = out->ctx;
        print_metric_t print_metric = out->print_metric;
        double total, ratio = 0.0, total2;
+       const char *color = NULL;
        int ctx = evsel_context(evsel);
 
        if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
@@ -452,6 +574,46 @@ void perf_stat__print_shadow_stats(struct perf_evsel *evsel,
                                     avg / ratio);
                else
                        print_metric(ctxp, NULL, NULL, "CPUs utilized", 0);
+       } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
+               double fe_bound = td_fe_bound(ctx, cpu);
+
+               if (fe_bound > 0.2)
+                       color = PERF_COLOR_RED;
+               print_metric(ctxp, color, "%8.1f%%", "frontend bound",
+                               fe_bound * 100.);
+       } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
+               double retiring = td_retiring(ctx, cpu);
+
+               if (retiring > 0.7)
+                       color = PERF_COLOR_GREEN;
+               print_metric(ctxp, color, "%8.1f%%", "retiring",
+                               retiring * 100.);
+       } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
+               double bad_spec = td_bad_spec(ctx, cpu);
+
+               if (bad_spec > 0.1)
+                       color = PERF_COLOR_RED;
+               print_metric(ctxp, color, "%8.1f%%", "bad speculation",
+                               bad_spec * 100.);
+       } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
+               double be_bound = td_be_bound(ctx, cpu);
+               const char *name = "backend bound";
+               static int have_recovery_bubbles = -1;
+
+               /* In case the CPU does not support topdown-recovery-bubbles */
+               if (have_recovery_bubbles < 0)
+                       have_recovery_bubbles = pmu_have_event("cpu",
+                                       "topdown-recovery-bubbles");
+               if (!have_recovery_bubbles)
+                       name = "backend bound/bad spec";
+
+               if (be_bound > 0.2)
+                       color = PERF_COLOR_RED;
+               if (td_total_slots(ctx, cpu) > 0)
+                       print_metric(ctxp, color, "%8.1f%%", name,
+                                       be_bound * 100.);
+               else
+                       print_metric(ctxp, NULL, NULL, name, 0);
        } else if (runtime_nsecs_stats[cpu].n != 0) {
                char unit = 'M';
                char unit_buf[10];
index ffa1d0653861617bcce62b8781e5bfce6e7e5c94..39345c2ddfc22edcfde844e5eb95b72930f4eeda 100644 (file)
@@ -79,6 +79,11 @@ static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
        ID(TRANSACTION_START,   cpu/tx-start/),
        ID(ELISION_START,       cpu/el-start/),
        ID(CYCLES_IN_TX_CP,     cpu/cycles-ct/),
+       ID(TOPDOWN_TOTAL_SLOTS, topdown-total-slots),
+       ID(TOPDOWN_SLOTS_ISSUED, topdown-slots-issued),
+       ID(TOPDOWN_SLOTS_RETIRED, topdown-slots-retired),
+       ID(TOPDOWN_FETCH_BUBBLES, topdown-fetch-bubbles),
+       ID(TOPDOWN_RECOVERY_BUBBLES, topdown-recovery-bubbles),
 };
 #undef ID
 
@@ -157,7 +162,7 @@ int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                if (perf_evsel__alloc_stats(evsel, alloc_raw))
                        goto out_free;
        }
@@ -173,7 +178,7 @@ void perf_evlist__free_stats(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                perf_evsel__free_stat_priv(evsel);
                perf_evsel__free_counts(evsel);
                perf_evsel__free_prev_raw_counts(evsel);
@@ -184,7 +189,7 @@ void perf_evlist__reset_stats(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
 
-       evlist__for_each(evlist, evsel) {
+       evlist__for_each_entry(evlist, evsel) {
                perf_evsel__reset_stat_priv(evsel);
                perf_evsel__reset_counts(evsel);
        }
index 0150e786ccc7c1f48e407323b606f47155a076c9..c29bb94c48a4b6070a659e3674f4ae84694724f9 100644 (file)
@@ -17,6 +17,11 @@ enum perf_stat_evsel_id {
        PERF_STAT_EVSEL_ID__TRANSACTION_START,
        PERF_STAT_EVSEL_ID__ELISION_START,
        PERF_STAT_EVSEL_ID__CYCLES_IN_TX_CP,
+       PERF_STAT_EVSEL_ID__TOPDOWN_TOTAL_SLOTS,
+       PERF_STAT_EVSEL_ID__TOPDOWN_SLOTS_ISSUED,
+       PERF_STAT_EVSEL_ID__TOPDOWN_SLOTS_RETIRED,
+       PERF_STAT_EVSEL_ID__TOPDOWN_FETCH_BUBBLES,
+       PERF_STAT_EVSEL_ID__TOPDOWN_RECOVERY_BUBBLES,
        PERF_STAT_EVSEL_ID__MAX,
 };
 
index f95f682aa2b2c3fd04b20e96e652e5696895a4b4..817593908d47a3d1ea27eb73080b94852e88d990 100644 (file)
@@ -1,5 +1,5 @@
 #include "debug.h"
-#include "cache.h"
+#include "util.h"
 #include <linux/kernel.h>
 
 int prefixcmp(const char *str, const char *prefix)
index 54b409297d4a0cfe7e82e3e90041dfeee4311490..b268a6648a5d6ae0733a0a87a73ffaf2ae50f4f1 100644 (file)
@@ -40,6 +40,9 @@
 
 #include <assert.h>
 #include <stdarg.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/types.h>
 
 extern char strbuf_slopbuf[];
 struct strbuf {
index ca990029e2430a5681d56682ae6176d66fc3ee0a..19207e50fce50a0bf7d01f11bbc87b6b64ecba6a 100644 (file)
@@ -73,7 +73,7 @@ static inline struct str_node *strlist__next(struct str_node *sn)
  * @pos:       the &struct str_node to use as a loop cursor.
  * @slist:     the &struct strlist for loop.
  */
-#define strlist__for_each(pos, slist)  \
+#define strlist__for_each_entry(pos, slist)    \
        for (pos = strlist__first(slist); pos; pos = strlist__next(pos))
 
 /**
@@ -83,7 +83,7 @@ static inline struct str_node *strlist__next(struct str_node *sn)
  * @n:         another &struct str_node to use as temporary storage.
  * @slist:     the &struct strlist for loop.
  */
-#define strlist__for_each_safe(pos, n, slist)  \
+#define strlist__for_each_entry_safe(pos, n, slist)    \
        for (pos = strlist__first(slist), n = strlist__next(pos); pos;\
             pos = n, n = strlist__next(n))
 #endif /* __PERF_STRLIST_H */
index 87a297dd89016e13a829d4a7397c74e0892eae30..a34321e9b44d8a42c1f4839b844b15e196a6417b 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "symbol.h"
 #include "demangle-java.h"
+#include "demangle-rust.h"
 #include "machine.h"
 #include "vdso.h"
 #include <symbol/kallsyms.h>
@@ -16,6 +17,7 @@
 #define EM_AARCH64     183  /* ARM 64 bit */
 #endif
 
+typedef Elf64_Nhdr GElf_Nhdr;
 
 #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
 extern char *cplus_demangle(const char *, int);
@@ -54,6 +56,14 @@ static int elf_getphdrnum(Elf *elf, size_t *dst)
 }
 #endif
 
+#ifndef HAVE_ELF_GETSHDRSTRNDX_SUPPORT
+static int elf_getshdrstrndx(Elf *elf __maybe_unused, size_t *dst __maybe_unused)
+{
+       pr_err("%s: update your libelf to > 0.140, this one lacks elf_getshdrstrndx().\n", __func__);
+       return -1;
+}
+#endif
+
 #ifndef NT_GNU_BUILD_ID
 #define NT_GNU_BUILD_ID 3
 #endif
@@ -1072,6 +1082,13 @@ new_symbol:
                        demangled = bfd_demangle(NULL, elf_name, demangle_flags);
                        if (demangled == NULL)
                                demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
+                       else if (rust_is_mangled(demangled))
+                               /*
+                                * Input to Rust demangling is the BFD-demangled
+                                * name which it Rust-demangles in place.
+                                */
+                               rust_demangle_sym(demangled);
+
                        if (demangled != NULL)
                                elf_name = demangled;
                }
@@ -1781,6 +1798,260 @@ void kcore_extract__delete(struct kcore_extract *kce)
        unlink(kce->extract_filename);
 }
 
+#ifdef HAVE_GELF_GETNOTE_SUPPORT
+/**
+ * populate_sdt_note : Parse raw data and identify SDT note
+ * @elf: elf of the opened file
+ * @data: raw data of a section with description offset applied
+ * @len: note description size
+ * @type: type of the note
+ * @sdt_notes: List to add the SDT note
+ *
+ * Responsible for parsing the @data in section .note.stapsdt in @elf and
+ * if its an SDT note, it appends to @sdt_notes list.
+ */
+static int populate_sdt_note(Elf **elf, const char *data, size_t len,
+                            struct list_head *sdt_notes)
+{
+       const char *provider, *name;
+       struct sdt_note *tmp = NULL;
+       GElf_Ehdr ehdr;
+       GElf_Addr base_off = 0;
+       GElf_Shdr shdr;
+       int ret = -EINVAL;
+
+       union {
+               Elf64_Addr a64[NR_ADDR];
+               Elf32_Addr a32[NR_ADDR];
+       } buf;
+
+       Elf_Data dst = {
+               .d_buf = &buf, .d_type = ELF_T_ADDR, .d_version = EV_CURRENT,
+               .d_size = gelf_fsize((*elf), ELF_T_ADDR, NR_ADDR, EV_CURRENT),
+               .d_off = 0, .d_align = 0
+       };
+       Elf_Data src = {
+               .d_buf = (void *) data, .d_type = ELF_T_ADDR,
+               .d_version = EV_CURRENT, .d_size = dst.d_size, .d_off = 0,
+               .d_align = 0
+       };
+
+       tmp = (struct sdt_note *)calloc(1, sizeof(struct sdt_note));
+       if (!tmp) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       INIT_LIST_HEAD(&tmp->note_list);
+
+       if (len < dst.d_size + 3)
+               goto out_free_note;
+
+       /* Translation from file representation to memory representation */
+       if (gelf_xlatetom(*elf, &dst, &src,
+                         elf_getident(*elf, NULL)[EI_DATA]) == NULL) {
+               pr_err("gelf_xlatetom : %s\n", elf_errmsg(-1));
+               goto out_free_note;
+       }
+
+       /* Populate the fields of sdt_note */
+       provider = data + dst.d_size;
+
+       name = (const char *)memchr(provider, '\0', data + len - provider);
+       if (name++ == NULL)
+               goto out_free_note;
+
+       tmp->provider = strdup(provider);
+       if (!tmp->provider) {
+               ret = -ENOMEM;
+               goto out_free_note;
+       }
+       tmp->name = strdup(name);
+       if (!tmp->name) {
+               ret = -ENOMEM;
+               goto out_free_prov;
+       }
+
+       if (gelf_getclass(*elf) == ELFCLASS32) {
+               memcpy(&tmp->addr, &buf, 3 * sizeof(Elf32_Addr));
+               tmp->bit32 = true;
+       } else {
+               memcpy(&tmp->addr, &buf, 3 * sizeof(Elf64_Addr));
+               tmp->bit32 = false;
+       }
+
+       if (!gelf_getehdr(*elf, &ehdr)) {
+               pr_debug("%s : cannot get elf header.\n", __func__);
+               ret = -EBADF;
+               goto out_free_name;
+       }
+
+       /* Adjust the prelink effect :
+        * Find out the .stapsdt.base section.
+        * This scn will help us to handle prelinking (if present).
+        * Compare the retrieved file offset of the base section with the
+        * base address in the description of the SDT note. If its different,
+        * then accordingly, adjust the note location.
+        */
+       if (elf_section_by_name(*elf, &ehdr, &shdr, SDT_BASE_SCN, NULL)) {
+               base_off = shdr.sh_offset;
+               if (base_off) {
+                       if (tmp->bit32)
+                               tmp->addr.a32[0] = tmp->addr.a32[0] + base_off -
+                                       tmp->addr.a32[1];
+                       else
+                               tmp->addr.a64[0] = tmp->addr.a64[0] + base_off -
+                                       tmp->addr.a64[1];
+               }
+       }
+
+       list_add_tail(&tmp->note_list, sdt_notes);
+       return 0;
+
+out_free_name:
+       free(tmp->name);
+out_free_prov:
+       free(tmp->provider);
+out_free_note:
+       free(tmp);
+out_err:
+       return ret;
+}
+
+/**
+ * construct_sdt_notes_list : constructs a list of SDT notes
+ * @elf : elf to look into
+ * @sdt_notes : empty list_head
+ *
+ * Scans the sections in 'elf' for the section
+ * .note.stapsdt. It, then calls populate_sdt_note to find
+ * out the SDT events and populates the 'sdt_notes'.
+ */
+static int construct_sdt_notes_list(Elf *elf, struct list_head *sdt_notes)
+{
+       GElf_Ehdr ehdr;
+       Elf_Scn *scn = NULL;
+       Elf_Data *data;
+       GElf_Shdr shdr;
+       size_t shstrndx, next;
+       GElf_Nhdr nhdr;
+       size_t name_off, desc_off, offset;
+       int ret = 0;
+
+       if (gelf_getehdr(elf, &ehdr) == NULL) {
+               ret = -EBADF;
+               goto out_ret;
+       }
+       if (elf_getshdrstrndx(elf, &shstrndx) != 0) {
+               ret = -EBADF;
+               goto out_ret;
+       }
+
+       /* Look for the required section */
+       scn = elf_section_by_name(elf, &ehdr, &shdr, SDT_NOTE_SCN, NULL);
+       if (!scn) {
+               ret = -ENOENT;
+               goto out_ret;
+       }
+
+       if ((shdr.sh_type != SHT_NOTE) || (shdr.sh_flags & SHF_ALLOC)) {
+               ret = -ENOENT;
+               goto out_ret;
+       }
+
+       data = elf_getdata(scn, NULL);
+
+       /* Get the SDT notes */
+       for (offset = 0; (next = gelf_getnote(data, offset, &nhdr, &name_off,
+                                             &desc_off)) > 0; offset = next) {
+               if (nhdr.n_namesz == sizeof(SDT_NOTE_NAME) &&
+                   !memcmp(data->d_buf + name_off, SDT_NOTE_NAME,
+                           sizeof(SDT_NOTE_NAME))) {
+                       /* Check the type of the note */
+                       if (nhdr.n_type != SDT_NOTE_TYPE)
+                               goto out_ret;
+
+                       ret = populate_sdt_note(&elf, ((data->d_buf) + desc_off),
+                                               nhdr.n_descsz, sdt_notes);
+                       if (ret < 0)
+                               goto out_ret;
+               }
+       }
+       if (list_empty(sdt_notes))
+               ret = -ENOENT;
+
+out_ret:
+       return ret;
+}
+
+/**
+ * get_sdt_note_list : Wrapper to construct a list of sdt notes
+ * @head : empty list_head
+ * @target : file to find SDT notes from
+ *
+ * This opens the file, initializes
+ * the ELF and then calls construct_sdt_notes_list.
+ */
+int get_sdt_note_list(struct list_head *head, const char *target)
+{
+       Elf *elf;
+       int fd, ret;
+
+       fd = open(target, O_RDONLY);
+       if (fd < 0)
+               return -EBADF;
+
+       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+       if (!elf) {
+               ret = -EBADF;
+               goto out_close;
+       }
+       ret = construct_sdt_notes_list(elf, head);
+       elf_end(elf);
+out_close:
+       close(fd);
+       return ret;
+}
+
+/**
+ * cleanup_sdt_note_list : free the sdt notes' list
+ * @sdt_notes: sdt notes' list
+ *
+ * Free up the SDT notes in @sdt_notes.
+ * Returns the number of SDT notes free'd.
+ */
+int cleanup_sdt_note_list(struct list_head *sdt_notes)
+{
+       struct sdt_note *tmp, *pos;
+       int nr_free = 0;
+
+       list_for_each_entry_safe(pos, tmp, sdt_notes, note_list) {
+               list_del(&pos->note_list);
+               free(pos->name);
+               free(pos->provider);
+               free(pos);
+               nr_free++;
+       }
+       return nr_free;
+}
+
+/**
+ * sdt_notes__get_count: Counts the number of sdt events
+ * @start: list_head to sdt_notes list
+ *
+ * Returns the number of SDT notes in a list
+ */
+int sdt_notes__get_count(struct list_head *start)
+{
+       struct sdt_note *sdt_ptr;
+       int count = 0;
+
+       list_for_each_entry(sdt_ptr, start, note_list)
+               count++;
+       return count;
+}
+#endif
+
 void symbol__elf_init(void)
 {
        elf_version(EV_CURRENT);
index 54c4ff2b1cee298335a9cdecfd3d07a43f51d241..37e8d20ae03e29ef2a9fbc48e048858a4f820785 100644 (file)
@@ -1430,7 +1430,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
         * Read the build id if possible. This is required for
         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
         */
-       if (is_regular_file(name) &&
+       if (is_regular_file(dso->long_name) &&
            filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
                dso__set_build_id(dso, build_id);
 
@@ -1626,7 +1626,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
        if (!dirs)
                return -1;
 
-       strlist__for_each(nd, dirs) {
+       strlist__for_each_entry(nd, dirs) {
                scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
                          "%s/%s/kallsyms", dir, nd->s);
                if (!validate_kcore_addresses(kallsyms_filename, map)) {
@@ -1641,6 +1641,20 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
        return ret;
 }
 
+/*
+ * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
+ * since access(R_OK) only checks with real UID/GID but open() use effective
+ * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
+ */
+static bool filename__readable(const char *file)
+{
+       int fd = open(file, O_RDONLY);
+       if (fd < 0)
+               return false;
+       close(fd);
+       return true;
+}
+
 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
 {
        u8 host_build_id[BUILD_ID_SIZE];
@@ -1660,58 +1674,43 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
                                 sizeof(host_build_id)) == 0)
                is_host = dso__build_id_equal(dso, host_build_id);
 
-       build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
-
-       scnprintf(path, sizeof(path), "%s/%s/%s", buildid_dir,
-                 DSO__NAME_KCORE, sbuild_id);
-
-       /* Use /proc/kallsyms if possible */
+       /* Try a fast path for /proc/kallsyms if possible */
        if (is_host) {
-               DIR *d;
-               int fd;
-
-               /* If no cached kcore go with /proc/kallsyms */
-               d = opendir(path);
-               if (!d)
-                       goto proc_kallsyms;
-               closedir(d);
-
                /*
-                * Do not check the build-id cache, until we know we cannot use
-                * /proc/kcore.
+                * Do not check the build-id cache, unless we know we cannot use
+                * /proc/kcore or module maps don't match to /proc/kallsyms.
+                * To check readability of /proc/kcore, do not use access(R_OK)
+                * since /proc/kcore requires CAP_SYS_RAWIO to read and access
+                * can't check it.
                 */
-               fd = open("/proc/kcore", O_RDONLY);
-               if (fd != -1) {
-                       close(fd);
-                       /* If module maps match go with /proc/kallsyms */
-                       if (!validate_kcore_addresses("/proc/kallsyms", map))
-                               goto proc_kallsyms;
-               }
-
-               /* Find kallsyms in build-id cache with kcore */
-               if (!find_matching_kcore(map, path, sizeof(path)))
-                       return strdup(path);
-
-               goto proc_kallsyms;
+               if (filename__readable("/proc/kcore") &&
+                   !validate_kcore_addresses("/proc/kallsyms", map))
+                       goto proc_kallsyms;
        }
 
+       build_id__sprintf(dso->build_id, sizeof(dso->build_id), sbuild_id);
+
        /* Find kallsyms in build-id cache with kcore */
+       scnprintf(path, sizeof(path), "%s/%s/%s",
+                 buildid_dir, DSO__NAME_KCORE, sbuild_id);
+
        if (!find_matching_kcore(map, path, sizeof(path)))
                return strdup(path);
 
-       scnprintf(path, sizeof(path), "%s/%s/%s",
-                 buildid_dir, DSO__NAME_KALLSYMS, sbuild_id);
+       /* Use current /proc/kallsyms if possible */
+       if (is_host) {
+proc_kallsyms:
+               return strdup("/proc/kallsyms");
+       }
 
-       if (access(path, F_OK)) {
+       /* Finally, find a cache of kallsyms */
+       if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
                pr_err("No kallsyms or vmlinux with build-id %s was found\n",
                       sbuild_id);
                return NULL;
        }
 
        return strdup(path);
-
-proc_kallsyms:
-       return strdup("/proc/kallsyms");
 }
 
 static int dso__load_kernel_sym(struct dso *dso, struct map *map,
index b10d558a88032919c6249aa2417477022c3d71e2..699f7cbcfe720b3ac9ed4fbf0e110db1136aae35 100644 (file)
@@ -342,4 +342,26 @@ void arch__sym_update(struct symbol *s, GElf_Sym *sym);
 
 int arch__choose_best_symbol(struct symbol *syma, struct symbol *symb);
 
+/* structure containing an SDT note's info */
+struct sdt_note {
+       char *name;                     /* name of the note*/
+       char *provider;                 /* provider name */
+       bool bit32;                     /* whether the location is 32 bits? */
+       union {                         /* location, base and semaphore addrs */
+               Elf64_Addr a64[3];
+               Elf32_Addr a32[3];
+       } addr;
+       struct list_head note_list;     /* SDT notes' list */
+};
+
+int get_sdt_note_list(struct list_head *head, const char *target);
+int cleanup_sdt_note_list(struct list_head *sdt_notes);
+int sdt_notes__get_count(struct list_head *start);
+
+#define SDT_BASE_SCN ".stapsdt.base"
+#define SDT_NOTE_SCN  ".note.stapsdt"
+#define SDT_NOTE_TYPE 3
+#define SDT_NOTE_NAME "stapsdt"
+#define NR_ADDR 3
+
 #endif /* __PERF_SYMBOL */
index a53603b27e525864eeb328e934ba696dc3ea7e6e..8cdcf4641c51de6a1f5a4df597280be97833c07e 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include "target.h"
+#include "util.h"
 #include "debug.h"
 
 #include <pwd.h>
@@ -121,7 +122,7 @@ int target__strerror(struct target *target, int errnum,
        BUG_ON(buflen == 0);
 
        if (errnum >= 0) {
-               const char *err = strerror_r(errnum, buf, buflen);
+               const char *err = str_error_r(errnum, buf, buflen);
 
                if (err != buf)
                        scnprintf(buf, buflen, "%s", err);
index 825086aa9a08685303c08cddb36823620fa65c9e..d3301529f6a762a17e5e390f8fd53286b5fc6145 100644 (file)
@@ -616,3 +616,10 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
 
        return err;
 }
+
+size_t thread_stack__depth(struct thread *thread)
+{
+       if (!thread->ts)
+               return 0;
+       return thread->ts->cnt;
+}
index ad44c7944b8e72bbdbd931983cf972d3a598799e..b7e41c4ebfdd98ec038f823e70051a49c3fa6a62 100644 (file)
@@ -87,6 +87,7 @@ void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
                          size_t sz, u64 ip);
 int thread_stack__flush(struct thread *thread);
 void thread_stack__free(struct thread *thread);
+size_t thread_stack__depth(struct thread *thread);
 
 struct call_return_processor *
 call_return_processor__new(int (*process)(struct call_return *cr, void *data),
index 45fcb715a36b3f6a975600d41eb877b387b78417..8b10a55410a258980e1e40216540db3881bcd379 100644 (file)
@@ -43,9 +43,6 @@ struct thread *thread__new(pid_t pid, pid_t tid)
                thread->cpu = -1;
                INIT_LIST_HEAD(&thread->comm_list);
 
-               if (unwind__prepare_access(thread) < 0)
-                       goto err_thread;
-
                comm_str = malloc(32);
                if (!comm_str)
                        goto err_thread;
@@ -201,10 +198,51 @@ size_t thread__fprintf(struct thread *thread, FILE *fp)
               map_groups__fprintf(thread->mg, fp);
 }
 
-void thread__insert_map(struct thread *thread, struct map *map)
+int thread__insert_map(struct thread *thread, struct map *map)
 {
+       int ret;
+
+       ret = unwind__prepare_access(thread, map, NULL);
+       if (ret)
+               return ret;
+
        map_groups__fixup_overlappings(thread->mg, map, stderr);
        map_groups__insert(thread->mg, map);
+
+       return 0;
+}
+
+static int __thread__prepare_access(struct thread *thread)
+{
+       bool initialized = false;
+       int i, err = 0;
+
+       for (i = 0; i < MAP__NR_TYPES; ++i) {
+               struct maps *maps = &thread->mg->maps[i];
+               struct map *map;
+
+               pthread_rwlock_rdlock(&maps->lock);
+
+               for (map = maps__first(maps); map; map = map__next(map)) {
+                       err = unwind__prepare_access(thread, map, &initialized);
+                       if (err || initialized)
+                               break;
+               }
+
+               pthread_rwlock_unlock(&maps->lock);
+       }
+
+       return err;
+}
+
+static int thread__prepare_access(struct thread *thread)
+{
+       int err = 0;
+
+       if (symbol_conf.use_callchain)
+               err = __thread__prepare_access(thread);
+
+       return err;
 }
 
 static int thread__clone_map_groups(struct thread *thread,
@@ -214,7 +252,7 @@ static int thread__clone_map_groups(struct thread *thread,
 
        /* This is new thread, we share map groups for process. */
        if (thread->pid_ == parent->pid_)
-               return 0;
+               return thread__prepare_access(thread);
 
        if (thread->mg == parent->mg) {
                pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
@@ -224,7 +262,7 @@ static int thread__clone_map_groups(struct thread *thread,
 
        /* But this one is new process, copy maps. */
        for (i = 0; i < MAP__NR_TYPES; ++i)
-               if (map_groups__clone(thread->mg, parent->mg, i) < 0)
+               if (map_groups__clone(thread, parent->mg, i) < 0)
                        return -ENOMEM;
 
        return 0;
@@ -265,3 +303,14 @@ void thread__find_cpumode_addr_location(struct thread *thread,
                        break;
        }
 }
+
+struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
+{
+       if (thread->pid_ == thread->tid)
+               return thread__get(thread);
+
+       if (thread->pid_ == -1)
+               return NULL;
+
+       return machine__find_thread(machine, thread->pid_, thread->pid_);
+}
index 45fba13c800bd36150248d4ade2b9f0b4b053329..99263cb6e6b646bd987fc3ff410c2efa495f3944 100644 (file)
@@ -9,11 +9,9 @@
 #include "symbol.h"
 #include <strlist.h>
 #include <intlist.h>
-#ifdef HAVE_LIBUNWIND_SUPPORT
-#include <libunwind.h>
-#endif
 
 struct thread_stack;
+struct unwind_libunwind_ops;
 
 struct thread {
        union {
@@ -36,7 +34,8 @@ struct thread {
        void                    *priv;
        struct thread_stack     *ts;
 #ifdef HAVE_LIBUNWIND_SUPPORT
-       unw_addr_space_t        addr_space;
+       void                            *addr_space;
+       struct unwind_libunwind_ops     *unwind_libunwind_ops;
 #endif
 };
 
@@ -77,10 +76,12 @@ int thread__comm_len(struct thread *thread);
 struct comm *thread__comm(const struct thread *thread);
 struct comm *thread__exec_comm(const struct thread *thread);
 const char *thread__comm_str(const struct thread *thread);
-void thread__insert_map(struct thread *thread, struct map *map);
+int thread__insert_map(struct thread *thread, struct map *map);
 int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
 size_t thread__fprintf(struct thread *thread, FILE *fp);
 
+struct thread *thread__main_thread(struct machine *machine, struct thread *thread);
+
 void thread__find_addr_map(struct thread *thread,
                           u8 cpumode, enum map_type type, u64 addr,
                           struct addr_location *al);
index 5654fe15e036795896d3548c568bd8fa3657da06..40585f5b7027d1d1eb7c0d78e754c1a7963a74f9 100644 (file)
@@ -202,7 +202,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
        if (!slist)
                return NULL;
 
-       strlist__for_each(pos, slist) {
+       strlist__for_each_entry(pos, slist) {
                pid = strtol(pos->s, &end_ptr, 10);
 
                if (pid == INT_MIN || pid == INT_MAX ||
@@ -278,7 +278,7 @@ struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
        if (!slist)
                return NULL;
 
-       strlist__for_each(pos, slist) {
+       strlist__for_each_entry(pos, slist) {
                tid = strtol(pos->s, &end_ptr, 10);
 
                if (tid == INT_MIN || tid == INT_MAX ||
index 8ae051e0ec79090e674fdf9a6cc9fbece8275ebb..c330780674fcffc270563cebf338e175478b849c 100644 (file)
@@ -105,3 +105,11 @@ trace_event__tp_format(const char *sys, const char *name)
 
        return tp_format(sys, name);
 }
+
+struct event_format *trace_event__tp_format_id(int id)
+{
+       if (!tevent_initialized && trace_event__init2())
+               return ERR_PTR(-ENOMEM);
+
+       return pevent_find_event(tevent.pevent, id);
+}
index bce5b1dac26827735e538d737e9417555f3c3e2b..b0af9c81bb0df292ff798b9012971ee347b2dc1f 100644 (file)
@@ -23,6 +23,8 @@ int trace_event__register_resolver(struct machine *machine,
 struct event_format*
 trace_event__tp_format(const char *sys, const char *name);
 
+struct event_format *trace_event__tp_format_id(int id);
+
 int bigendian(void);
 
 void event_format__fprintf(struct event_format *event,
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
new file mode 100644 (file)
index 0000000..97c0f8f
--- /dev/null
@@ -0,0 +1,699 @@
+/*
+ * Post mortem Dwarf CFI based unwinding on top of regs and stack dumps.
+ *
+ * Lots of this code have been borrowed or heavily inspired from parts of
+ * the libunwind 0.99 code which are (amongst other contributors I may have
+ * forgotten):
+ *
+ * Copyright (C) 2002-2007 Hewlett-Packard Co
+ *     Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * And the bugs have been added by:
+ *
+ * Copyright (C) 2010, Frederic Weisbecker <fweisbec@gmail.com>
+ * Copyright (C) 2012, Jiri Olsa <jolsa@redhat.com>
+ *
+ */
+
+#include <elf.h>
+#include <gelf.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <linux/list.h>
+#ifndef REMOTE_UNWIND_LIBUNWIND
+#include <libunwind.h>
+#include <libunwind-ptrace.h>
+#endif
+#include "callchain.h"
+#include "thread.h"
+#include "session.h"
+#include "perf_regs.h"
+#include "unwind.h"
+#include "symbol.h"
+#include "util.h"
+#include "debug.h"
+#include "asm/bug.h"
+
+extern int
+UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
+                                   unw_word_t ip,
+                                   unw_dyn_info_t *di,
+                                   unw_proc_info_t *pi,
+                                   int need_unwind_info, void *arg);
+
+#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
+
+extern int
+UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
+                                unw_word_t ip,
+                                unw_word_t segbase,
+                                const char *obj_name, unw_word_t start,
+                                unw_word_t end);
+
+#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
+
+#define DW_EH_PE_FORMAT_MASK   0x0f    /* format of the encoded value */
+#define DW_EH_PE_APPL_MASK     0x70    /* how the value is to be applied */
+
+/* Pointer-encoding formats: */
+#define DW_EH_PE_omit          0xff
+#define DW_EH_PE_ptr           0x00    /* pointer-sized unsigned value */
+#define DW_EH_PE_udata4                0x03    /* unsigned 32-bit value */
+#define DW_EH_PE_udata8                0x04    /* unsigned 64-bit value */
+#define DW_EH_PE_sdata4                0x0b    /* signed 32-bit value */
+#define DW_EH_PE_sdata8                0x0c    /* signed 64-bit value */
+
+/* Pointer-encoding application: */
+#define DW_EH_PE_absptr                0x00    /* absolute value */
+#define DW_EH_PE_pcrel         0x10    /* rel. to addr. of encoded value */
+
+/*
+ * The following are not documented by LSB v1.3, yet they are used by
+ * GCC, presumably they aren't documented by LSB since they aren't
+ * used on Linux:
+ */
+#define DW_EH_PE_funcrel       0x40    /* start-of-procedure-relative */
+#define DW_EH_PE_aligned       0x50    /* aligned pointer */
+
+/* Flags intentionaly not handled, since they're not needed:
+ * #define DW_EH_PE_indirect      0x80
+ * #define DW_EH_PE_uleb128       0x01
+ * #define DW_EH_PE_udata2        0x02
+ * #define DW_EH_PE_sleb128       0x09
+ * #define DW_EH_PE_sdata2        0x0a
+ * #define DW_EH_PE_textrel       0x20
+ * #define DW_EH_PE_datarel       0x30
+ */
+
+struct unwind_info {
+       struct perf_sample      *sample;
+       struct machine          *machine;
+       struct thread           *thread;
+};
+
+#define dw_read(ptr, type, end) ({     \
+       type *__p = (type *) ptr;       \
+       type  __v;                      \
+       if ((__p + 1) > (type *) end)   \
+               return -EINVAL;         \
+       __v = *__p++;                   \
+       ptr = (typeof(ptr)) __p;        \
+       __v;                            \
+       })
+
+static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
+                                  u8 encoding)
+{
+       u8 *cur = *p;
+       *val = 0;
+
+       switch (encoding) {
+       case DW_EH_PE_omit:
+               *val = 0;
+               goto out;
+       case DW_EH_PE_ptr:
+               *val = dw_read(cur, unsigned long, end);
+               goto out;
+       default:
+               break;
+       }
+
+       switch (encoding & DW_EH_PE_APPL_MASK) {
+       case DW_EH_PE_absptr:
+               break;
+       case DW_EH_PE_pcrel:
+               *val = (unsigned long) cur;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if ((encoding & 0x07) == 0x00)
+               encoding |= DW_EH_PE_udata4;
+
+       switch (encoding & DW_EH_PE_FORMAT_MASK) {
+       case DW_EH_PE_sdata4:
+               *val += dw_read(cur, s32, end);
+               break;
+       case DW_EH_PE_udata4:
+               *val += dw_read(cur, u32, end);
+               break;
+       case DW_EH_PE_sdata8:
+               *val += dw_read(cur, s64, end);
+               break;
+       case DW_EH_PE_udata8:
+               *val += dw_read(cur, u64, end);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+ out:
+       *p = cur;
+       return 0;
+}
+
+#define dw_read_encoded_value(ptr, end, enc) ({                        \
+       u64 __v;                                                \
+       if (__dw_read_encoded_value(&ptr, end, &__v, enc)) {    \
+               return -EINVAL;                                 \
+       }                                                       \
+       __v;                                                    \
+       })
+
+static u64 elf_section_offset(int fd, const char *name)
+{
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       GElf_Shdr shdr;
+       u64 offset = 0;
+
+       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+       if (elf == NULL)
+               return 0;
+
+       do {
+               if (gelf_getehdr(elf, &ehdr) == NULL)
+                       break;
+
+               if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
+                       break;
+
+               offset = shdr.sh_offset;
+       } while (0);
+
+       elf_end(elf);
+       return offset;
+}
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int elf_is_exec(int fd, const char *name)
+{
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       int retval = 0;
+
+       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+       if (elf == NULL)
+               return 0;
+       if (gelf_getehdr(elf, &ehdr) == NULL)
+               goto out;
+
+       retval = (ehdr.e_type == ET_EXEC);
+
+out:
+       elf_end(elf);
+       pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval);
+       return retval;
+}
+#endif
+
+struct table_entry {
+       u32 start_ip_offset;
+       u32 fde_offset;
+};
+
+struct eh_frame_hdr {
+       unsigned char version;
+       unsigned char eh_frame_ptr_enc;
+       unsigned char fde_count_enc;
+       unsigned char table_enc;
+
+       /*
+        * The rest of the header is variable-length and consists of the
+        * following members:
+        *
+        *      encoded_t eh_frame_ptr;
+        *      encoded_t fde_count;
+        */
+
+       /* A single encoded pointer should not be more than 8 bytes. */
+       u64 enc[2];
+
+       /*
+        * struct {
+        *    encoded_t start_ip;
+        *    encoded_t fde_addr;
+        * } binary_search_table[fde_count];
+        */
+       char data[0];
+} __packed;
+
+static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
+                              u64 offset, u64 *table_data, u64 *segbase,
+                              u64 *fde_count)
+{
+       struct eh_frame_hdr hdr;
+       u8 *enc = (u8 *) &hdr.enc;
+       u8 *end = (u8 *) &hdr.data;
+       ssize_t r;
+
+       r = dso__data_read_offset(dso, machine, offset,
+                                 (u8 *) &hdr, sizeof(hdr));
+       if (r != sizeof(hdr))
+               return -EINVAL;
+
+       /* We dont need eh_frame_ptr, just skip it. */
+       dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc);
+
+       *fde_count  = dw_read_encoded_value(enc, end, hdr.fde_count_enc);
+       *segbase    = offset;
+       *table_data = (enc - (u8 *) &hdr) + offset;
+       return 0;
+}
+
+static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
+                                    u64 *table_data, u64 *segbase,
+                                    u64 *fde_count)
+{
+       int ret = -EINVAL, fd;
+       u64 offset = dso->data.eh_frame_hdr_offset;
+
+       if (offset == 0) {
+               fd = dso__data_get_fd(dso, machine);
+               if (fd < 0)
+                       return -EINVAL;
+
+               /* Check the .eh_frame section for unwinding info */
+               offset = elf_section_offset(fd, ".eh_frame_hdr");
+               dso->data.eh_frame_hdr_offset = offset;
+               dso__data_put_fd(dso);
+       }
+
+       if (offset)
+               ret = unwind_spec_ehframe(dso, machine, offset,
+                                         table_data, segbase,
+                                         fde_count);
+
+       return ret;
+}
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int read_unwind_spec_debug_frame(struct dso *dso,
+                                       struct machine *machine, u64 *offset)
+{
+       int fd;
+       u64 ofs = dso->data.debug_frame_offset;
+
+       if (ofs == 0) {
+               fd = dso__data_get_fd(dso, machine);
+               if (fd < 0)
+                       return -EINVAL;
+
+               /* Check the .debug_frame section for unwinding info */
+               ofs = elf_section_offset(fd, ".debug_frame");
+               dso->data.debug_frame_offset = ofs;
+               dso__data_put_fd(dso);
+       }
+
+       *offset = ofs;
+       if (*offset)
+               return 0;
+
+       return -EINVAL;
+}
+#endif
+
+static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
+{
+       struct addr_location al;
+
+       thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
+                             MAP__FUNCTION, ip, &al);
+       if (!al.map) {
+               /*
+                * We've seen cases (softice) where DWARF unwinder went
+                * through non executable mmaps, which we need to lookup
+                * in MAP__VARIABLE tree.
+                */
+               thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
+                                     MAP__VARIABLE, ip, &al);
+       }
+       return al.map;
+}
+
+static int
+find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
+              int need_unwind_info, void *arg)
+{
+       struct unwind_info *ui = arg;
+       struct map *map;
+       unw_dyn_info_t di;
+       u64 table_data, segbase, fde_count;
+       int ret = -EINVAL;
+
+       map = find_map(ip, ui);
+       if (!map || !map->dso)
+               return -EINVAL;
+
+       pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
+
+       /* Check the .eh_frame section for unwinding info */
+       if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
+                                      &table_data, &segbase, &fde_count)) {
+               memset(&di, 0, sizeof(di));
+               di.format   = UNW_INFO_FORMAT_REMOTE_TABLE;
+               di.start_ip = map->start;
+               di.end_ip   = map->end;
+               di.u.rti.segbase    = map->start + segbase;
+               di.u.rti.table_data = map->start + table_data;
+               di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
+                                     / sizeof(unw_word_t);
+               ret = dwarf_search_unwind_table(as, ip, &di, pi,
+                                               need_unwind_info, arg);
+       }
+
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+       /* Check the .debug_frame section for unwinding info */
+       if (ret < 0 &&
+           !read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+               int fd = dso__data_get_fd(map->dso, ui->machine);
+               int is_exec = elf_is_exec(fd, map->dso->name);
+               unw_word_t base = is_exec ? 0 : map->start;
+               const char *symfile;
+
+               if (fd >= 0)
+                       dso__data_put_fd(map->dso);
+
+               symfile = map->dso->symsrc_filename ?: map->dso->name;
+
+               memset(&di, 0, sizeof(di));
+               if (dwarf_find_debug_frame(0, &di, ip, base, symfile,
+                                          map->start, map->end))
+                       return dwarf_search_unwind_table(as, ip, &di, pi,
+                                                        need_unwind_info, arg);
+       }
+#endif
+
+       return ret;
+}
+
+static int access_fpreg(unw_addr_space_t __maybe_unused as,
+                       unw_regnum_t __maybe_unused num,
+                       unw_fpreg_t __maybe_unused *val,
+                       int __maybe_unused __write,
+                       void __maybe_unused *arg)
+{
+       pr_err("unwind: access_fpreg unsupported\n");
+       return -UNW_EINVAL;
+}
+
+static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as,
+                                 unw_word_t __maybe_unused *dil_addr,
+                                 void __maybe_unused *arg)
+{
+       return -UNW_ENOINFO;
+}
+
+static int resume(unw_addr_space_t __maybe_unused as,
+                 unw_cursor_t __maybe_unused *cu,
+                 void __maybe_unused *arg)
+{
+       pr_err("unwind: resume unsupported\n");
+       return -UNW_EINVAL;
+}
+
+static int
+get_proc_name(unw_addr_space_t __maybe_unused as,
+             unw_word_t __maybe_unused addr,
+               char __maybe_unused *bufp, size_t __maybe_unused buf_len,
+               unw_word_t __maybe_unused *offp, void __maybe_unused *arg)
+{
+       pr_err("unwind: get_proc_name unsupported\n");
+       return -UNW_EINVAL;
+}
+
+static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
+                         unw_word_t *data)
+{
+       struct map *map;
+       ssize_t size;
+
+       map = find_map(addr, ui);
+       if (!map) {
+               pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
+               return -1;
+       }
+
+       if (!map->dso)
+               return -1;
+
+       size = dso__data_read_addr(map->dso, map, ui->machine,
+                                  addr, (u8 *) data, sizeof(*data));
+
+       return !(size == sizeof(*data));
+}
+
+static int access_mem(unw_addr_space_t __maybe_unused as,
+                     unw_word_t addr, unw_word_t *valp,
+                     int __write, void *arg)
+{
+       struct unwind_info *ui = arg;
+       struct stack_dump *stack = &ui->sample->user_stack;
+       u64 start, end;
+       int offset;
+       int ret;
+
+       /* Don't support write, probably not needed. */
+       if (__write || !stack || !ui->sample->user_regs.regs) {
+               *valp = 0;
+               return 0;
+       }
+
+       ret = perf_reg_value(&start, &ui->sample->user_regs,
+                            LIBUNWIND__ARCH_REG_SP);
+       if (ret)
+               return ret;
+
+       end = start + stack->size;
+
+       /* Check overflow. */
+       if (addr + sizeof(unw_word_t) < addr)
+               return -EINVAL;
+
+       if (addr < start || addr + sizeof(unw_word_t) >= end) {
+               ret = access_dso_mem(ui, addr, valp);
+               if (ret) {
+                       pr_debug("unwind: access_mem %p not inside range"
+                                " 0x%" PRIx64 "-0x%" PRIx64 "\n",
+                                (void *) (uintptr_t) addr, start, end);
+                       *valp = 0;
+                       return ret;
+               }
+               return 0;
+       }
+
+       offset = addr - start;
+       *valp  = *(unw_word_t *)&stack->data[offset];
+       pr_debug("unwind: access_mem addr %p val %lx, offset %d\n",
+                (void *) (uintptr_t) addr, (unsigned long)*valp, offset);
+       return 0;
+}
+
+static int access_reg(unw_addr_space_t __maybe_unused as,
+                     unw_regnum_t regnum, unw_word_t *valp,
+                     int __write, void *arg)
+{
+       struct unwind_info *ui = arg;
+       int id, ret;
+       u64 val;
+
+       /* Don't support write, I suspect we don't need it. */
+       if (__write) {
+               pr_err("unwind: access_reg w %d\n", regnum);
+               return 0;
+       }
+
+       if (!ui->sample->user_regs.regs) {
+               *valp = 0;
+               return 0;
+       }
+
+       id = LIBUNWIND__ARCH_REG_ID(regnum);
+       if (id < 0)
+               return -EINVAL;
+
+       ret = perf_reg_value(&val, &ui->sample->user_regs, id);
+       if (ret) {
+               pr_err("unwind: can't read reg %d\n", regnum);
+               return ret;
+       }
+
+       *valp = (unw_word_t) val;
+       pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp);
+       return 0;
+}
+
+static void put_unwind_info(unw_addr_space_t __maybe_unused as,
+                           unw_proc_info_t *pi __maybe_unused,
+                           void *arg __maybe_unused)
+{
+       pr_debug("unwind: put_unwind_info called\n");
+}
+
+static int entry(u64 ip, struct thread *thread,
+                unwind_entry_cb_t cb, void *arg)
+{
+       struct unwind_entry e;
+       struct addr_location al;
+
+       thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
+                                  MAP__FUNCTION, ip, &al);
+
+       e.ip = ip;
+       e.map = al.map;
+       e.sym = al.sym;
+
+       pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
+                al.sym ? al.sym->name : "''",
+                ip,
+                al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
+
+       return cb(&e, arg);
+}
+
+static void display_error(int err)
+{
+       switch (err) {
+       case UNW_EINVAL:
+               pr_err("unwind: Only supports local.\n");
+               break;
+       case UNW_EUNSPEC:
+               pr_err("unwind: Unspecified error.\n");
+               break;
+       case UNW_EBADREG:
+               pr_err("unwind: Register unavailable.\n");
+               break;
+       default:
+               break;
+       }
+}
+
+static unw_accessors_t accessors = {
+       .find_proc_info         = find_proc_info,
+       .put_unwind_info        = put_unwind_info,
+       .get_dyn_info_list_addr = get_dyn_info_list_addr,
+       .access_mem             = access_mem,
+       .access_reg             = access_reg,
+       .access_fpreg           = access_fpreg,
+       .resume                 = resume,
+       .get_proc_name          = get_proc_name,
+};
+
+static int _unwind__prepare_access(struct thread *thread)
+{
+       if (callchain_param.record_mode != CALLCHAIN_DWARF)
+               return 0;
+
+       thread->addr_space = unw_create_addr_space(&accessors, 0);
+       if (!thread->addr_space) {
+               pr_err("unwind: Can't create unwind address space.\n");
+               return -ENOMEM;
+       }
+
+       unw_set_caching_policy(thread->addr_space, UNW_CACHE_GLOBAL);
+       return 0;
+}
+
+static void _unwind__flush_access(struct thread *thread)
+{
+       if (callchain_param.record_mode != CALLCHAIN_DWARF)
+               return;
+
+       unw_flush_cache(thread->addr_space, 0, 0);
+}
+
+static void _unwind__finish_access(struct thread *thread)
+{
+       if (callchain_param.record_mode != CALLCHAIN_DWARF)
+               return;
+
+       unw_destroy_addr_space(thread->addr_space);
+}
+
+static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
+                      void *arg, int max_stack)
+{
+       u64 val;
+       unw_word_t ips[max_stack];
+       unw_addr_space_t addr_space;
+       unw_cursor_t c;
+       int ret, i = 0;
+
+       ret = perf_reg_value(&val, &ui->sample->user_regs,
+                            LIBUNWIND__ARCH_REG_IP);
+       if (ret)
+               return ret;
+
+       ips[i++] = (unw_word_t) val;
+
+       /*
+        * If we need more than one entry, do the DWARF
+        * unwind itself.
+        */
+       if (max_stack - 1 > 0) {
+               WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
+               addr_space = ui->thread->addr_space;
+
+               if (addr_space == NULL)
+                       return -1;
+
+               ret = unw_init_remote(&c, addr_space, ui);
+               if (ret)
+                       display_error(ret);
+
+               while (!ret && (unw_step(&c) > 0) && i < max_stack) {
+                       unw_get_reg(&c, UNW_REG_IP, &ips[i]);
+                       ++i;
+               }
+
+               max_stack = i;
+       }
+
+       /*
+        * Display what we got based on the order setup.
+        */
+       for (i = 0; i < max_stack && !ret; i++) {
+               int j = i;
+
+               if (callchain_param.order == ORDER_CALLER)
+                       j = max_stack - i - 1;
+               ret = ips[j] ? entry(ips[j], ui->thread, cb, arg) : 0;
+       }
+
+       return ret;
+}
+
+static int _unwind__get_entries(unwind_entry_cb_t cb, void *arg,
+                       struct thread *thread,
+                       struct perf_sample *data, int max_stack)
+{
+       struct unwind_info ui = {
+               .sample       = data,
+               .thread       = thread,
+               .machine      = thread->mg->machine,
+       };
+
+       if (!data->user_regs.regs)
+               return -EINVAL;
+
+       if (max_stack <= 0)
+               return -EINVAL;
+
+       return get_entries(&ui, cb, arg, max_stack);
+}
+
+static struct unwind_libunwind_ops
+_unwind_libunwind_ops = {
+       .prepare_access = _unwind__prepare_access,
+       .flush_access   = _unwind__flush_access,
+       .finish_access  = _unwind__finish_access,
+       .get_entries    = _unwind__get_entries,
+};
+
+#ifndef REMOTE_UNWIND_LIBUNWIND
+struct unwind_libunwind_ops *
+local_unwind_libunwind_ops = &_unwind_libunwind_ops;
+#endif
index 63687d3a344e7f39b94f465547e929b2c2779b40..6d542a4e0648eeabb0cb58eda0cefbfa03d1028a 100644 (file)
-/*
- * Post mortem Dwarf CFI based unwinding on top of regs and stack dumps.
- *
- * Lots of this code have been borrowed or heavily inspired from parts of
- * the libunwind 0.99 code which are (amongst other contributors I may have
- * forgotten):
- *
- * Copyright (C) 2002-2007 Hewlett-Packard Co
- *     Contributed by David Mosberger-Tang <davidm@hpl.hp.com>
- *
- * And the bugs have been added by:
- *
- * Copyright (C) 2010, Frederic Weisbecker <fweisbec@gmail.com>
- * Copyright (C) 2012, Jiri Olsa <jolsa@redhat.com>
- *
- */
-
-#include <elf.h>
-#include <gelf.h>
-#include <fcntl.h>
-#include <string.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <linux/list.h>
-#include <libunwind.h>
-#include <libunwind-ptrace.h>
-#include "callchain.h"
+#include "unwind.h"
 #include "thread.h"
 #include "session.h"
-#include "perf_regs.h"
-#include "unwind.h"
-#include "symbol.h"
-#include "util.h"
 #include "debug.h"
-#include "asm/bug.h"
-
-extern int
-UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
-                                   unw_word_t ip,
-                                   unw_dyn_info_t *di,
-                                   unw_proc_info_t *pi,
-                                   int need_unwind_info, void *arg);
-
-#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table)
-
-extern int
-UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
-                                unw_word_t ip,
-                                unw_word_t segbase,
-                                const char *obj_name, unw_word_t start,
-                                unw_word_t end);
-
-#define dwarf_find_debug_frame UNW_OBJ(dwarf_find_debug_frame)
-
-#define DW_EH_PE_FORMAT_MASK   0x0f    /* format of the encoded value */
-#define DW_EH_PE_APPL_MASK     0x70    /* how the value is to be applied */
-
-/* Pointer-encoding formats: */
-#define DW_EH_PE_omit          0xff
-#define DW_EH_PE_ptr           0x00    /* pointer-sized unsigned value */
-#define DW_EH_PE_udata4                0x03    /* unsigned 32-bit value */
-#define DW_EH_PE_udata8                0x04    /* unsigned 64-bit value */
-#define DW_EH_PE_sdata4                0x0b    /* signed 32-bit value */
-#define DW_EH_PE_sdata8                0x0c    /* signed 64-bit value */
-
-/* Pointer-encoding application: */
-#define DW_EH_PE_absptr                0x00    /* absolute value */
-#define DW_EH_PE_pcrel         0x10    /* rel. to addr. of encoded value */
-
-/*
- * The following are not documented by LSB v1.3, yet they are used by
- * GCC, presumably they aren't documented by LSB since they aren't
- * used on Linux:
- */
-#define DW_EH_PE_funcrel       0x40    /* start-of-procedure-relative */
-#define DW_EH_PE_aligned       0x50    /* aligned pointer */
+#include "arch/common.h"
 
-/* Flags intentionaly not handled, since they're not needed:
- * #define DW_EH_PE_indirect      0x80
- * #define DW_EH_PE_uleb128       0x01
- * #define DW_EH_PE_udata2        0x02
- * #define DW_EH_PE_sleb128       0x09
- * #define DW_EH_PE_sdata2        0x0a
- * #define DW_EH_PE_textrel       0x20
- * #define DW_EH_PE_datarel       0x30
- */
+struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
+struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
+struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
 
-struct unwind_info {
-       struct perf_sample      *sample;
-       struct machine          *machine;
-       struct thread           *thread;
-};
-
-#define dw_read(ptr, type, end) ({     \
-       type *__p = (type *) ptr;       \
-       type  __v;                      \
-       if ((__p + 1) > (type *) end)   \
-               return -EINVAL;         \
-       __v = *__p++;                   \
-       ptr = (typeof(ptr)) __p;        \
-       __v;                            \
-       })
-
-static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val,
-                                  u8 encoding)
+static void unwind__register_ops(struct thread *thread,
+                         struct unwind_libunwind_ops *ops)
 {
-       u8 *cur = *p;
-       *val = 0;
-
-       switch (encoding) {
-       case DW_EH_PE_omit:
-               *val = 0;
-               goto out;
-       case DW_EH_PE_ptr:
-               *val = dw_read(cur, unsigned long, end);
-               goto out;
-       default:
-               break;
-       }
-
-       switch (encoding & DW_EH_PE_APPL_MASK) {
-       case DW_EH_PE_absptr:
-               break;
-       case DW_EH_PE_pcrel:
-               *val = (unsigned long) cur;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if ((encoding & 0x07) == 0x00)
-               encoding |= DW_EH_PE_udata4;
-
-       switch (encoding & DW_EH_PE_FORMAT_MASK) {
-       case DW_EH_PE_sdata4:
-               *val += dw_read(cur, s32, end);
-               break;
-       case DW_EH_PE_udata4:
-               *val += dw_read(cur, u32, end);
-               break;
-       case DW_EH_PE_sdata8:
-               *val += dw_read(cur, s64, end);
-               break;
-       case DW_EH_PE_udata8:
-               *val += dw_read(cur, u64, end);
-               break;
-       default:
-               return -EINVAL;
-       }
-
- out:
-       *p = cur;
-       return 0;
-}
-
-#define dw_read_encoded_value(ptr, end, enc) ({                        \
-       u64 __v;                                                \
-       if (__dw_read_encoded_value(&ptr, end, &__v, enc)) {    \
-               return -EINVAL;                                 \
-       }                                                       \
-       __v;                                                    \
-       })
-
-static u64 elf_section_offset(int fd, const char *name)
-{
-       Elf *elf;
-       GElf_Ehdr ehdr;
-       GElf_Shdr shdr;
-       u64 offset = 0;
-
-       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
-       if (elf == NULL)
-               return 0;
-
-       do {
-               if (gelf_getehdr(elf, &ehdr) == NULL)
-                       break;
-
-               if (!elf_section_by_name(elf, &ehdr, &shdr, name, NULL))
-                       break;
-
-               offset = shdr.sh_offset;
-       } while (0);
-
-       elf_end(elf);
-       return offset;
+       thread->unwind_libunwind_ops = ops;
 }
 
-#ifndef NO_LIBUNWIND_DEBUG_FRAME
-static int elf_is_exec(int fd, const char *name)
+int unwind__prepare_access(struct thread *thread, struct map *map,
+                          bool *initialized)
 {
-       Elf *elf;
-       GElf_Ehdr ehdr;
-       int retval = 0;
+       const char *arch;
+       enum dso_type dso_type;
+       struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
+       int err;
 
-       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
-       if (elf == NULL)
+       if (thread->addr_space) {
+               pr_debug("unwind: thread map already set, dso=%s\n",
+                        map->dso->name);
+               if (initialized)
+                       *initialized = true;
                return 0;
-       if (gelf_getehdr(elf, &ehdr) == NULL)
-               goto out;
-
-       retval = (ehdr.e_type == ET_EXEC);
-
-out:
-       elf_end(elf);
-       pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval);
-       return retval;
-}
-#endif
-
-struct table_entry {
-       u32 start_ip_offset;
-       u32 fde_offset;
-};
-
-struct eh_frame_hdr {
-       unsigned char version;
-       unsigned char eh_frame_ptr_enc;
-       unsigned char fde_count_enc;
-       unsigned char table_enc;
-
-       /*
-        * The rest of the header is variable-length and consists of the
-        * following members:
-        *
-        *      encoded_t eh_frame_ptr;
-        *      encoded_t fde_count;
-        */
-
-       /* A single encoded pointer should not be more than 8 bytes. */
-       u64 enc[2];
-
-       /*
-        * struct {
-        *    encoded_t start_ip;
-        *    encoded_t fde_addr;
-        * } binary_search_table[fde_count];
-        */
-       char data[0];
-} __packed;
-
-static int unwind_spec_ehframe(struct dso *dso, struct machine *machine,
-                              u64 offset, u64 *table_data, u64 *segbase,
-                              u64 *fde_count)
-{
-       struct eh_frame_hdr hdr;
-       u8 *enc = (u8 *) &hdr.enc;
-       u8 *end = (u8 *) &hdr.data;
-       ssize_t r;
-
-       r = dso__data_read_offset(dso, machine, offset,
-                                 (u8 *) &hdr, sizeof(hdr));
-       if (r != sizeof(hdr))
-               return -EINVAL;
-
-       /* We dont need eh_frame_ptr, just skip it. */
-       dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc);
-
-       *fde_count  = dw_read_encoded_value(enc, end, hdr.fde_count_enc);
-       *segbase    = offset;
-       *table_data = (enc - (u8 *) &hdr) + offset;
-       return 0;
-}
-
-static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
-                                    u64 *table_data, u64 *segbase,
-                                    u64 *fde_count)
-{
-       int ret = -EINVAL, fd;
-       u64 offset = dso->data.eh_frame_hdr_offset;
-
-       if (offset == 0) {
-               fd = dso__data_get_fd(dso, machine);
-               if (fd < 0)
-                       return -EINVAL;
-
-               /* Check the .eh_frame section for unwinding info */
-               offset = elf_section_offset(fd, ".eh_frame_hdr");
-               dso->data.eh_frame_hdr_offset = offset;
-               dso__data_put_fd(dso);
        }
 
-       if (offset)
-               ret = unwind_spec_ehframe(dso, machine, offset,
-                                         table_data, segbase,
-                                         fde_count);
+       /* env->arch is NULL for live-mode (i.e. perf top) */
+       if (!thread->mg->machine->env || !thread->mg->machine->env->arch)
+               goto out_register;
 
-       return ret;
-}
-
-#ifndef NO_LIBUNWIND_DEBUG_FRAME
-static int read_unwind_spec_debug_frame(struct dso *dso,
-                                       struct machine *machine, u64 *offset)
-{
-       int fd;
-       u64 ofs = dso->data.debug_frame_offset;
-
-       if (ofs == 0) {
-               fd = dso__data_get_fd(dso, machine);
-               if (fd < 0)
-                       return -EINVAL;
-
-               /* Check the .debug_frame section for unwinding info */
-               ofs = elf_section_offset(fd, ".debug_frame");
-               dso->data.debug_frame_offset = ofs;
-               dso__data_put_fd(dso);
-       }
-
-       *offset = ofs;
-       if (*offset)
+       dso_type = dso__type(map->dso, thread->mg->machine);
+       if (dso_type == DSO__TYPE_UNKNOWN)
                return 0;
 
-       return -EINVAL;
-}
-#endif
-
-static struct map *find_map(unw_word_t ip, struct unwind_info *ui)
-{
-       struct addr_location al;
-
-       thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
-                             MAP__FUNCTION, ip, &al);
-       if (!al.map) {
-               /*
-                * We've seen cases (softice) where DWARF unwinder went
-                * through non executable mmaps, which we need to lookup
-                * in MAP__VARIABLE tree.
-                */
-               thread__find_addr_map(ui->thread, PERF_RECORD_MISC_USER,
-                                     MAP__VARIABLE, ip, &al);
-       }
-       return al.map;
-}
-
-static int
-find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
-              int need_unwind_info, void *arg)
-{
-       struct unwind_info *ui = arg;
-       struct map *map;
-       unw_dyn_info_t di;
-       u64 table_data, segbase, fde_count;
-       int ret = -EINVAL;
-
-       map = find_map(ip, ui);
-       if (!map || !map->dso)
-               return -EINVAL;
-
-       pr_debug("unwind: find_proc_info dso %s\n", map->dso->name);
-
-       /* Check the .eh_frame section for unwinding info */
-       if (!read_unwind_spec_eh_frame(map->dso, ui->machine,
-                                      &table_data, &segbase, &fde_count)) {
-               memset(&di, 0, sizeof(di));
-               di.format   = UNW_INFO_FORMAT_REMOTE_TABLE;
-               di.start_ip = map->start;
-               di.end_ip   = map->end;
-               di.u.rti.segbase    = map->start + segbase;
-               di.u.rti.table_data = map->start + table_data;
-               di.u.rti.table_len  = fde_count * sizeof(struct table_entry)
-                                     / sizeof(unw_word_t);
-               ret = dwarf_search_unwind_table(as, ip, &di, pi,
-                                               need_unwind_info, arg);
-       }
-
-#ifndef NO_LIBUNWIND_DEBUG_FRAME
-       /* Check the .debug_frame section for unwinding info */
-       if (ret < 0 &&
-           !read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
-               int fd = dso__data_get_fd(map->dso, ui->machine);
-               int is_exec = elf_is_exec(fd, map->dso->name);
-               unw_word_t base = is_exec ? 0 : map->start;
-               const char *symfile;
-
-               if (fd >= 0)
-                       dso__data_put_fd(map->dso);
-
-               symfile = map->dso->symsrc_filename ?: map->dso->name;
-
-               memset(&di, 0, sizeof(di));
-               if (dwarf_find_debug_frame(0, &di, ip, base, symfile,
-                                          map->start, map->end))
-                       return dwarf_search_unwind_table(as, ip, &di, pi,
-                                                        need_unwind_info, arg);
-       }
-#endif
-
-       return ret;
-}
-
-static int access_fpreg(unw_addr_space_t __maybe_unused as,
-                       unw_regnum_t __maybe_unused num,
-                       unw_fpreg_t __maybe_unused *val,
-                       int __maybe_unused __write,
-                       void __maybe_unused *arg)
-{
-       pr_err("unwind: access_fpreg unsupported\n");
-       return -UNW_EINVAL;
-}
-
-static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as,
-                                 unw_word_t __maybe_unused *dil_addr,
-                                 void __maybe_unused *arg)
-{
-       return -UNW_ENOINFO;
-}
-
-static int resume(unw_addr_space_t __maybe_unused as,
-                 unw_cursor_t __maybe_unused *cu,
-                 void __maybe_unused *arg)
-{
-       pr_err("unwind: resume unsupported\n");
-       return -UNW_EINVAL;
-}
+       arch = normalize_arch(thread->mg->machine->env->arch);
 
-static int
-get_proc_name(unw_addr_space_t __maybe_unused as,
-             unw_word_t __maybe_unused addr,
-               char __maybe_unused *bufp, size_t __maybe_unused buf_len,
-               unw_word_t __maybe_unused *offp, void __maybe_unused *arg)
-{
-       pr_err("unwind: get_proc_name unsupported\n");
-       return -UNW_EINVAL;
-}
-
-static int access_dso_mem(struct unwind_info *ui, unw_word_t addr,
-                         unw_word_t *data)
-{
-       struct map *map;
-       ssize_t size;
-
-       map = find_map(addr, ui);
-       if (!map) {
-               pr_debug("unwind: no map for %lx\n", (unsigned long)addr);
-               return -1;
+       if (!strcmp(arch, "x86")) {
+               if (dso_type != DSO__TYPE_64BIT)
+                       ops = x86_32_unwind_libunwind_ops;
+       } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) {
+               if (dso_type == DSO__TYPE_64BIT)
+                       ops = arm64_unwind_libunwind_ops;
        }
 
-       if (!map->dso)
+       if (!ops) {
+               pr_err("unwind: target platform=%s is not supported\n", arch);
                return -1;
-
-       size = dso__data_read_addr(map->dso, map, ui->machine,
-                                  addr, (u8 *) data, sizeof(*data));
-
-       return !(size == sizeof(*data));
-}
-
-static int access_mem(unw_addr_space_t __maybe_unused as,
-                     unw_word_t addr, unw_word_t *valp,
-                     int __write, void *arg)
-{
-       struct unwind_info *ui = arg;
-       struct stack_dump *stack = &ui->sample->user_stack;
-       u64 start, end;
-       int offset;
-       int ret;
-
-       /* Don't support write, probably not needed. */
-       if (__write || !stack || !ui->sample->user_regs.regs) {
-               *valp = 0;
-               return 0;
-       }
-
-       ret = perf_reg_value(&start, &ui->sample->user_regs, PERF_REG_SP);
-       if (ret)
-               return ret;
-
-       end = start + stack->size;
-
-       /* Check overflow. */
-       if (addr + sizeof(unw_word_t) < addr)
-               return -EINVAL;
-
-       if (addr < start || addr + sizeof(unw_word_t) >= end) {
-               ret = access_dso_mem(ui, addr, valp);
-               if (ret) {
-                       pr_debug("unwind: access_mem %p not inside range"
-                                " 0x%" PRIx64 "-0x%" PRIx64 "\n",
-                                (void *) (uintptr_t) addr, start, end);
-                       *valp = 0;
-                       return ret;
-               }
-               return 0;
-       }
-
-       offset = addr - start;
-       *valp  = *(unw_word_t *)&stack->data[offset];
-       pr_debug("unwind: access_mem addr %p val %lx, offset %d\n",
-                (void *) (uintptr_t) addr, (unsigned long)*valp, offset);
-       return 0;
-}
-
-static int access_reg(unw_addr_space_t __maybe_unused as,
-                     unw_regnum_t regnum, unw_word_t *valp,
-                     int __write, void *arg)
-{
-       struct unwind_info *ui = arg;
-       int id, ret;
-       u64 val;
-
-       /* Don't support write, I suspect we don't need it. */
-       if (__write) {
-               pr_err("unwind: access_reg w %d\n", regnum);
-               return 0;
-       }
-
-       if (!ui->sample->user_regs.regs) {
-               *valp = 0;
-               return 0;
-       }
-
-       id = libunwind__arch_reg_id(regnum);
-       if (id < 0)
-               return -EINVAL;
-
-       ret = perf_reg_value(&val, &ui->sample->user_regs, id);
-       if (ret) {
-               pr_err("unwind: can't read reg %d\n", regnum);
-               return ret;
-       }
-
-       *valp = (unw_word_t) val;
-       pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp);
-       return 0;
-}
-
-static void put_unwind_info(unw_addr_space_t __maybe_unused as,
-                           unw_proc_info_t *pi __maybe_unused,
-                           void *arg __maybe_unused)
-{
-       pr_debug("unwind: put_unwind_info called\n");
-}
-
-static int entry(u64 ip, struct thread *thread,
-                unwind_entry_cb_t cb, void *arg)
-{
-       struct unwind_entry e;
-       struct addr_location al;
-
-       thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
-                                  MAP__FUNCTION, ip, &al);
-
-       e.ip = ip;
-       e.map = al.map;
-       e.sym = al.sym;
-
-       pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
-                al.sym ? al.sym->name : "''",
-                ip,
-                al.map ? al.map->map_ip(al.map, ip) : (u64) 0);
-
-       return cb(&e, arg);
-}
-
-static void display_error(int err)
-{
-       switch (err) {
-       case UNW_EINVAL:
-               pr_err("unwind: Only supports local.\n");
-               break;
-       case UNW_EUNSPEC:
-               pr_err("unwind: Unspecified error.\n");
-               break;
-       case UNW_EBADREG:
-               pr_err("unwind: Register unavailable.\n");
-               break;
-       default:
-               break;
-       }
-}
-
-static unw_accessors_t accessors = {
-       .find_proc_info         = find_proc_info,
-       .put_unwind_info        = put_unwind_info,
-       .get_dyn_info_list_addr = get_dyn_info_list_addr,
-       .access_mem             = access_mem,
-       .access_reg             = access_reg,
-       .access_fpreg           = access_fpreg,
-       .resume                 = resume,
-       .get_proc_name          = get_proc_name,
-};
-
-int unwind__prepare_access(struct thread *thread)
-{
-       if (callchain_param.record_mode != CALLCHAIN_DWARF)
-               return 0;
-
-       thread->addr_space = unw_create_addr_space(&accessors, 0);
-       if (!thread->addr_space) {
-               pr_err("unwind: Can't create unwind address space.\n");
-               return -ENOMEM;
        }
+out_register:
+       unwind__register_ops(thread, ops);
 
-       unw_set_caching_policy(thread->addr_space, UNW_CACHE_GLOBAL);
-       return 0;
+       err = thread->unwind_libunwind_ops->prepare_access(thread);
+       if (initialized)
+               *initialized = err ? false : true;
+       return err;
 }
 
 void unwind__flush_access(struct thread *thread)
 {
-       if (callchain_param.record_mode != CALLCHAIN_DWARF)
-               return;
-
-       unw_flush_cache(thread->addr_space, 0, 0);
+       if (thread->unwind_libunwind_ops)
+               thread->unwind_libunwind_ops->flush_access(thread);
 }
 
 void unwind__finish_access(struct thread *thread)
 {
-       if (callchain_param.record_mode != CALLCHAIN_DWARF)
-               return;
-
-       unw_destroy_addr_space(thread->addr_space);
-}
-
-static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb,
-                      void *arg, int max_stack)
-{
-       u64 val;
-       unw_word_t ips[max_stack];
-       unw_addr_space_t addr_space;
-       unw_cursor_t c;
-       int ret, i = 0;
-
-       ret = perf_reg_value(&val, &ui->sample->user_regs, PERF_REG_IP);
-       if (ret)
-               return ret;
-
-       ips[i++] = (unw_word_t) val;
-
-       /*
-        * If we need more than one entry, do the DWARF
-        * unwind itself.
-        */
-       if (max_stack - 1 > 0) {
-               WARN_ONCE(!ui->thread, "WARNING: ui->thread is NULL");
-               addr_space = ui->thread->addr_space;
-
-               if (addr_space == NULL)
-                       return -1;
-
-               ret = unw_init_remote(&c, addr_space, ui);
-               if (ret)
-                       display_error(ret);
-
-               while (!ret && (unw_step(&c) > 0) && i < max_stack) {
-                       unw_get_reg(&c, UNW_REG_IP, &ips[i]);
-                       ++i;
-               }
-
-               max_stack = i;
-       }
-
-       /*
-        * Display what we got based on the order setup.
-        */
-       for (i = 0; i < max_stack && !ret; i++) {
-               int j = i;
-
-               if (callchain_param.order == ORDER_CALLER)
-                       j = max_stack - i - 1;
-               ret = ips[j] ? entry(ips[j], ui->thread, cb, arg) : 0;
-       }
-
-       return ret;
+       if (thread->unwind_libunwind_ops)
+               thread->unwind_libunwind_ops->finish_access(thread);
 }
 
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
-                       struct thread *thread,
-                       struct perf_sample *data, int max_stack)
+                        struct thread *thread,
+                        struct perf_sample *data, int max_stack)
 {
-       struct unwind_info ui = {
-               .sample       = data,
-               .thread       = thread,
-               .machine      = thread->mg->machine,
-       };
-
-       if (!data->user_regs.regs)
-               return -EINVAL;
-
-       if (max_stack <= 0)
-               return -EINVAL;
-
-       return get_entries(&ui, cb, arg, max_stack);
+       if (thread->unwind_libunwind_ops)
+               return thread->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
+       return 0;
 }
index 12790cf94618eddb9cdcec8607792f57e75a73c8..61fb1e90ff5166c6c1f50f8446650cf302aa610d 100644 (file)
@@ -14,18 +14,42 @@ struct unwind_entry {
 
 typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
 
+struct unwind_libunwind_ops {
+       int (*prepare_access)(struct thread *thread);
+       void (*flush_access)(struct thread *thread);
+       void (*finish_access)(struct thread *thread);
+       int (*get_entries)(unwind_entry_cb_t cb, void *arg,
+                          struct thread *thread,
+                          struct perf_sample *data, int max_stack);
+};
+
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
                        struct thread *thread,
                        struct perf_sample *data, int max_stack);
 /* libunwind specific */
 #ifdef HAVE_LIBUNWIND_SUPPORT
-int libunwind__arch_reg_id(int regnum);
-int unwind__prepare_access(struct thread *thread);
+#ifndef LIBUNWIND__ARCH_REG_ID
+#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arch_reg_id(regnum)
+#endif
+
+#ifndef LIBUNWIND__ARCH_REG_SP
+#define LIBUNWIND__ARCH_REG_SP PERF_REG_SP
+#endif
+
+#ifndef LIBUNWIND__ARCH_REG_IP
+#define LIBUNWIND__ARCH_REG_IP PERF_REG_IP
+#endif
+
+int LIBUNWIND__ARCH_REG_ID(int regnum);
+int unwind__prepare_access(struct thread *thread, struct map *map,
+                          bool *initialized);
 void unwind__flush_access(struct thread *thread);
 void unwind__finish_access(struct thread *thread);
 #else
-static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
+static inline int unwind__prepare_access(struct thread *thread __maybe_unused,
+                                        struct map *map __maybe_unused,
+                                        bool *initialized __maybe_unused)
 {
        return 0;
 }
@@ -44,7 +68,9 @@ unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
        return 0;
 }
 
-static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
+static inline int unwind__prepare_access(struct thread *thread __maybe_unused,
+                                        struct map *map __maybe_unused,
+                                        bool *initialized __maybe_unused)
 {
        return 0;
 }
index 23504ad5d6dd2154ca04d63ab4aa97e764de6692..cee559d8c9e8e14ebedbbb5c8de081c00af7f63d 100644 (file)
 #include "callchain.h"
 #include "strlist.h"
 
-struct callchain_param callchain_param = {
-       .mode   = CHAIN_GRAPH_ABS,
-       .min_percent = 0.5,
-       .order  = ORDER_CALLEE,
-       .key    = CCKEY_FUNCTION,
-       .value  = CCVAL_PERCENT,
+#define CALLCHAIN_PARAM_DEFAULT                        \
+       .mode           = CHAIN_GRAPH_ABS,      \
+       .min_percent    = 0.5,                  \
+       .order          = ORDER_CALLEE,         \
+       .key            = CCKEY_FUNCTION,       \
+       .value          = CCVAL_PERCENT,        \
+
+struct callchain_param callchain_param = {
+       CALLCHAIN_PARAM_DEFAULT
+};
+
+struct callchain_param callchain_param_default = {
+       CALLCHAIN_PARAM_DEFAULT
 };
 
 /*
@@ -97,20 +104,17 @@ int rm_rf(char *path)
                scnprintf(namebuf, sizeof(namebuf), "%s/%s",
                          path, d->d_name);
 
-               ret = stat(namebuf, &statbuf);
+               /* We have to check symbolic link itself */
+               ret = lstat(namebuf, &statbuf);
                if (ret < 0) {
                        pr_debug("stat failed: %s\n", namebuf);
                        break;
                }
 
-               if (S_ISREG(statbuf.st_mode))
-                       ret = unlink(namebuf);
-               else if (S_ISDIR(statbuf.st_mode))
+               if (S_ISDIR(statbuf.st_mode))
                        ret = rm_rf(namebuf);
-               else {
-                       pr_debug("unknown file: %s\n", namebuf);
-                       ret = -1;
-               }
+               else
+                       ret = unlink(namebuf);
        }
        closedir(dir);
 
@@ -742,3 +746,19 @@ void print_binary(unsigned char *data, size_t len,
        }
        printer(BINARY_PRINT_DATA_END, -1, extra);
 }
+
+int is_printable_array(char *p, unsigned int len)
+{
+       unsigned int i;
+
+       if (!p || !len || p[len - 1] != 0)
+               return 0;
+
+       len--;
+
+       for (i = 0; i < len; i++) {
+               if (!isprint(p[i]) && !isspace(p[i]))
+                       return 0;
+       }
+       return 1;
+}
index 1e8c3167b9fb3ab070c05974fb028445600c6574..e5f55477491d6c60f02053f9b19ae1ba57eb5b80 100644 (file)
@@ -72,7 +72,6 @@
 #include <sys/ioctl.h>
 #include <inttypes.h>
 #include <linux/kernel.h>
-#include <linux/magic.h>
 #include <linux/types.h>
 #include <sys/ttydefaults.h>
 #include <api/fs/tracing_path.h>
@@ -360,4 +359,10 @@ typedef void (*print_binary_t)(enum binary_printer_ops,
 void print_binary(unsigned char *data, size_t len,
                  size_t bytes_per_line, print_binary_t printer,
                  void *extra);
+
+#if !defined(__GLIBC__) && !defined(__ANDROID__)
+extern int sched_getcpu(void);
+#endif
+
+int is_printable_array(char *p, unsigned int len);
 #endif /* GIT_COMPAT_UTIL_H */
index 44d440da15dcf73f97a81518c97a33be5c32b89b..7bdcad484225f13ed96ff07eb355a45acb997ac5 100644 (file)
@@ -134,8 +134,6 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s
        return dso;
 }
 
-#if BITS_PER_LONG == 64
-
 static enum dso_type machine__thread_dso_type(struct machine *machine,
                                              struct thread *thread)
 {
@@ -156,6 +154,8 @@ static enum dso_type machine__thread_dso_type(struct machine *machine,
        return dso_type;
 }
 
+#if BITS_PER_LONG == 64
+
 static int vdso__do_copy_compat(FILE *f, int fd)
 {
        char buf[4096];
@@ -283,8 +283,38 @@ static int __machine__findnew_vdso_compat(struct machine *machine,
 
 #endif
 
+static struct dso *machine__find_vdso(struct machine *machine,
+                                     struct thread *thread)
+{
+       struct dso *dso = NULL;
+       enum dso_type dso_type;
+
+       dso_type = machine__thread_dso_type(machine, thread);
+       switch (dso_type) {
+       case DSO__TYPE_32BIT:
+               dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO32, true);
+               if (!dso) {
+                       dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO,
+                                          true);
+                       if (dso && dso_type != dso__type(dso, machine))
+                               dso = NULL;
+               }
+               break;
+       case DSO__TYPE_X32BIT:
+               dso = __dsos__find(&machine->dsos, DSO__NAME_VDSOX32, true);
+               break;
+       case DSO__TYPE_64BIT:
+       case DSO__TYPE_UNKNOWN:
+       default:
+               dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, true);
+               break;
+       }
+
+       return dso;
+}
+
 struct dso *machine__findnew_vdso(struct machine *machine,
-                                 struct thread *thread __maybe_unused)
+                                 struct thread *thread)
 {
        struct vdso_info *vdso_info;
        struct dso *dso = NULL;
@@ -297,6 +327,10 @@ struct dso *machine__findnew_vdso(struct machine *machine,
        if (!vdso_info)
                goto out_unlock;
 
+       dso = machine__find_vdso(machine, thread);
+       if (dso)
+               goto out_unlock;
+
 #if BITS_PER_LONG == 64
        if (__machine__findnew_vdso_compat(machine, thread, vdso_info, &dso))
                goto out_unlock;
index b7447ceb75e9816509286f711030c0085703fdf2..b0ac057417500d3f7d41b180fe09c459c7cf5de5 100644 (file)
@@ -122,7 +122,7 @@ enum {
        NODE_TAGGED = 2,
 };
 
-#define THRASH_SIZE            1000 * 1000
+#define THRASH_SIZE            (1000 * 1000)
 #define N 127
 #define BATCH  33
 
index b325470c01b3669e38dcdcc13e674b011f97c7b7..1426a9b974944f5c07c4e15b36e90207644b2ea0 100644 (file)
@@ -99,8 +99,9 @@ configfrag_hotplug_cpu () {
 # identify_boot_image qemu-cmd
 #
 # Returns the relative path to the kernel build image.  This will be
-# arch/<arch>/boot/bzImage unless overridden with the TORTURE_BOOT_IMAGE
-# environment variable.
+# arch/<arch>/boot/bzImage or vmlinux if bzImage is not a target for the
+# architecture, unless overridden with the TORTURE_BOOT_IMAGE environment
+# variable.
 identify_boot_image () {
        if test -n "$TORTURE_BOOT_IMAGE"
        then
@@ -110,11 +111,8 @@ identify_boot_image () {
                qemu-system-x86_64|qemu-system-i386)
                        echo arch/x86/boot/bzImage
                        ;;
-               qemu-system-ppc64)
-                       echo arch/powerpc/boot/bzImage
-                       ;;
                *)
-                       echo ""
+                       echo vmlinux
                        ;;
                esac
        fi
@@ -175,7 +173,7 @@ identify_qemu_args () {
        qemu-system-x86_64|qemu-system-i386)
                ;;
        qemu-system-ppc64)
-               echo -enable-kvm -M pseries -cpu POWER7 -nodefaults
+               echo -enable-kvm -M pseries -nodefaults
                echo -device spapr-vscsi
                if test -n "$TORTURE_QEMU_INTERACTIVE" -a -n "$TORTURE_QEMU_MAC"
                then
index 4109f306d855360440ff6fc27037fd2246578789..ea6e373edc27255efe7dccccd86b12b9eb97c189 100755 (executable)
@@ -8,9 +8,9 @@
 #
 # Usage: kvm-test-1-run.sh config builddir resdir seconds qemu-args boot_args
 #
-# qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
-#                      arguments specifying the number of CPUs and other
-#                      options generated from the underlying CPU architecture.
+# qemu-args defaults to "-enable-kvm -nographic", along with arguments
+#                      specifying the number of CPUs and other options
+#                      generated from the underlying CPU architecture.
 # boot_args defaults to value returned by the per_version_boot_params
 #                      shell function.
 #
@@ -96,7 +96,8 @@ if test "$base_resdir" != "$resdir" -a -f $base_resdir/bzImage -a -f $base_resdi
 then
        # Rerunning previous test, so use that test's kernel.
        QEMU="`identify_qemu $base_resdir/vmlinux`"
-       KERNEL=$base_resdir/bzImage
+       BOOT_IMAGE="`identify_boot_image $QEMU`"
+       KERNEL=$base_resdir/${BOOT_IMAGE##*/} # use the last component of ${BOOT_IMAGE}
        ln -s $base_resdir/Make*.out $resdir  # for kvm-recheck.sh
        ln -s $base_resdir/.config $resdir  # for kvm-recheck.sh
 elif kvm-build.sh $config_template $builddir $T
@@ -110,7 +111,7 @@ then
        if test -n "$BOOT_IMAGE"
        then
                cp $builddir/$BOOT_IMAGE $resdir
-               KERNEL=$resdir/bzImage
+               KERNEL=$resdir/${BOOT_IMAGE##*/}
        else
                echo No identifiable boot image, not running KVM, see $resdir.
                echo Do the torture scripts know about your architecture?
@@ -147,7 +148,7 @@ then
 fi
 
 # Generate -smp qemu argument.
-qemu_args="-enable-kvm -soundhw pcspk -nographic $qemu_args"
+qemu_args="-enable-kvm -nographic $qemu_args"
 cpu_count=`configNR_CPUS.sh $config_template`
 cpu_count=`configfrag_boot_cpus "$boot_args" "$config_template" "$cpu_count"`
 vcpus=`identify_qemu_vcpus`
@@ -229,6 +230,7 @@ fi
 if test $commandcompleted -eq 0 -a -n "$qemu_pid"
 then
        echo Grace period for qemu job at pid $qemu_pid
+       oldline="`tail $resdir/console.log`"
        while :
        do
                kruntime=`awk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
@@ -238,13 +240,29 @@ then
                else
                        break
                fi
-               if test $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
+               must_continue=no
+               newline="`tail $resdir/console.log`"
+               if test "$newline" != "$oldline" && echo $newline | grep -q ' [0-9]\+us : '
+               then
+                       must_continue=yes
+               fi
+               last_ts="`tail $resdir/console.log | grep '^\[ *[0-9]\+\.[0-9]\+]' | tail -1 | sed -e 's/^\[ *//' -e 's/\..*$//'`"
+               if test -z "last_ts"
+               then
+                       last_ts=0
+               fi
+               if test "$newline" != "$oldline" -a "$last_ts" -lt $((seconds + $TORTURE_SHUTDOWN_GRACE))
+               then
+                       must_continue=yes
+               fi
+               if test $must_continue = no -a $kruntime -ge $((seconds + $TORTURE_SHUTDOWN_GRACE))
                then
                        echo "!!! PID $qemu_pid hung at $kruntime vs. $seconds seconds" >> $resdir/Warnings 2>&1
                        kill -KILL $qemu_pid
                        break
                fi
-               sleep 1
+               oldline=$newline
+               sleep 10
        done
 elif test -z "$qemu_pid"
 then
index 0d598145873e898d388b6e5d411ddc9b4c937897..0aed965f0062848a0c9899ce94e8687dc3e1b338 100755 (executable)
@@ -48,7 +48,7 @@ resdir=""
 configs=""
 cpus=0
 ds=`date +%Y.%m.%d-%H:%M:%S`
-jitter=0
+jitter="-1"
 
 . functions.sh
 
index 5eb49b7f864c7cfd0ffa84b1f9975ae66d74f14c..08aa7d50ae0e8963fcb7738eee9b28bfe75107e5 100755 (executable)
@@ -33,7 +33,7 @@ if grep -Pq '\x00' < $file
 then
        print_warning Console output contains nul bytes, old qemu still running?
 fi
-egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $1.diags
+egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $1.diags
 if test -s $1.diags
 then
        print_warning Assertion failure in $file $title
@@ -69,6 +69,11 @@ then
        then
                summary="$summary  Stalls: $n_stalls"
        fi
+       n_starves=`grep -c 'rcu_.*kthread starved for' $1`
+       if test "$n_starves" -ne 0
+       then
+               summary="$summary  Starves: $n_starves"
+       fi
        print_warning Summary: $summary
 else
        rm $1.diags
index 4170e714f04410f0eb5b16b9c92d76add8d9e40f..833f826d6ec2c942a0fbb27a3a6674186c48da1f 100644 (file)
@@ -13,6 +13,22 @@ cd initrd
 cpio -id < /tmp/initrd.img.zcat
 ------------------------------------------------------------------------
 
+Another way to create an initramfs image is using "dracut"[1], which is
+available on many distros, however the initramfs dracut generates is a cpio
+archive with another cpio archive in it, so an extra step is needed to create
+the initrd directory hierarchy.
+
+Here are the commands to create a initrd directory for rcutorture using
+dracut:
+
+------------------------------------------------------------------------
+dracut --no-hostonly --no-hostonly-cmdline --module "base bash shutdown" /tmp/initramfs.img
+cd tools/testing/selftests/rcutorture
+mkdir initrd
+cd initrd
+/usr/lib/dracut/skipcpio /tmp/initramfs.img | zcat | cpio -id < /tmp/initramfs.img
+------------------------------------------------------------------------
+
 Interestingly enough, if you are running rcutorture, you don't really
 need userspace in many cases.  Running without userspace has the
 advantage of allowing you to test your kernel independently of the
@@ -89,3 +105,9 @@ while :
 do
        sleep 10
 done
+------------------------------------------------------------------------
+
+References:
+[1]: https://dracut.wiki.kernel.org/index.php/Main_Page
+[2]: http://blog.elastocloud.org/2015/06/rapid-linux-kernel-devtest-with-qemu.html
+[3]: https://www.centos.org/forums/viewtopic.php?t=51621
index c73425de3cfe731c5743e7b0d6aec78d601527b5..4f747ee07f10a93447642ecf9d52e7d5e93f524b 100644 (file)
@@ -4,8 +4,8 @@ include ../lib.mk
 
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
-TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall \
-                       check_initial_reg_state sigreturn ldt_gdt iopl
+TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall test_mremap_vdso \
+                       check_initial_reg_state sigreturn ldt_gdt iopl mpx-mini-test
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
                        test_FCMOV test_FCOMI test_FISTTP \
                        vdso_restorer
diff --git a/tools/testing/selftests/x86/mpx-debug.h b/tools/testing/selftests/x86/mpx-debug.h
new file mode 100644 (file)
index 0000000..9230981
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef _MPX_DEBUG_H
+#define _MPX_DEBUG_H
+
+#ifndef DEBUG_LEVEL
+#define DEBUG_LEVEL 0
+#endif
+#define dprintf_level(level, args...) do { if(level <= DEBUG_LEVEL) printf(args); } while(0)
+#define dprintf1(args...) dprintf_level(1, args)
+#define dprintf2(args...) dprintf_level(2, args)
+#define dprintf3(args...) dprintf_level(3, args)
+#define dprintf4(args...) dprintf_level(4, args)
+#define dprintf5(args...) dprintf_level(5, args)
+
+#endif /* _MPX_DEBUG_H */
diff --git a/tools/testing/selftests/x86/mpx-dig.c b/tools/testing/selftests/x86/mpx-dig.c
new file mode 100644 (file)
index 0000000..ce85356
--- /dev/null
@@ -0,0 +1,498 @@
+/*
+ * Written by Dave Hansen <dave.hansen@intel.com>
+ */
+
+#include <stdlib.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <string.h>
+#include <fcntl.h>
+#include "mpx-debug.h"
+#include "mpx-mm.h"
+#include "mpx-hw.h"
+
+unsigned long bounds_dir_global;
+
+#define mpx_dig_abort()        __mpx_dig_abort(__FILE__, __func__, __LINE__)
+static void inline __mpx_dig_abort(const char *file, const char *func, int line)
+{
+       fprintf(stderr, "MPX dig abort @ %s::%d in %s()\n", file, line, func);
+       printf("MPX dig abort @ %s::%d in %s()\n", file, line, func);
+       abort();
+}
+
+/*
+ * run like this (BDIR finds the probably bounds directory):
+ *
+ *     BDIR="$(cat /proc/$pid/smaps | grep -B1 2097152 \
+ *             | head -1 | awk -F- '{print $1}')";
+ *     ./mpx-dig $pid 0x$BDIR
+ *
+ * NOTE:
+ *     assumes that the only 2097152-kb VMA is the bounds dir
+ */
+
+long nr_incore(void *ptr, unsigned long size_bytes)
+{
+       int i;
+       long ret = 0;
+       long vec_len = size_bytes / PAGE_SIZE;
+       unsigned char *vec = malloc(vec_len);
+       int incore_ret;
+
+       if (!vec)
+               mpx_dig_abort();
+
+       incore_ret = mincore(ptr, size_bytes, vec);
+       if (incore_ret) {
+               printf("mincore ret: %d\n", incore_ret);
+               perror("mincore");
+               mpx_dig_abort();
+       }
+       for (i = 0; i < vec_len; i++)
+               ret += vec[i];
+       free(vec);
+       return ret;
+}
+
+int open_proc(int pid, char *file)
+{
+       static char buf[100];
+       int fd;
+
+       snprintf(&buf[0], sizeof(buf), "/proc/%d/%s", pid, file);
+       fd = open(&buf[0], O_RDONLY);
+       if (fd < 0)
+               perror(buf);
+
+       return fd;
+}
+
+struct vaddr_range {
+       unsigned long start;
+       unsigned long end;
+};
+struct vaddr_range *ranges;
+int nr_ranges_allocated;
+int nr_ranges_populated;
+int last_range = -1;
+
+int __pid_load_vaddrs(int pid)
+{
+       int ret = 0;
+       int proc_maps_fd = open_proc(pid, "maps");
+       char linebuf[10000];
+       unsigned long start;
+       unsigned long end;
+       char rest[1000];
+       FILE *f = fdopen(proc_maps_fd, "r");
+
+       if (!f)
+               mpx_dig_abort();
+       nr_ranges_populated = 0;
+       while (!feof(f)) {
+               char *readret = fgets(linebuf, sizeof(linebuf), f);
+               int parsed;
+
+               if (readret == NULL) {
+                       if (feof(f))
+                               break;
+                       mpx_dig_abort();
+               }
+
+               parsed = sscanf(linebuf, "%lx-%lx%s", &start, &end, rest);
+               if (parsed != 3)
+                       mpx_dig_abort();
+
+               dprintf4("result[%d]: %lx-%lx<->%s\n", parsed, start, end, rest);
+               if (nr_ranges_populated >= nr_ranges_allocated) {
+                       ret = -E2BIG;
+                       break;
+               }
+               ranges[nr_ranges_populated].start = start;
+               ranges[nr_ranges_populated].end = end;
+               nr_ranges_populated++;
+       }
+       last_range = -1;
+       fclose(f);
+       close(proc_maps_fd);
+       return ret;
+}
+
+int pid_load_vaddrs(int pid)
+{
+       int ret;
+
+       dprintf2("%s(%d)\n", __func__, pid);
+       if (!ranges) {
+               nr_ranges_allocated = 4;
+               ranges = malloc(nr_ranges_allocated * sizeof(ranges[0]));
+               dprintf2("%s(%d) allocated %d ranges @ %p\n", __func__, pid,
+                        nr_ranges_allocated, ranges);
+               assert(ranges != NULL);
+       }
+       do {
+               ret = __pid_load_vaddrs(pid);
+               if (!ret)
+                       break;
+               if (ret == -E2BIG) {
+                       dprintf2("%s(%d) need to realloc\n", __func__, pid);
+                       nr_ranges_allocated *= 2;
+                       ranges = realloc(ranges,
+                                       nr_ranges_allocated * sizeof(ranges[0]));
+                       dprintf2("%s(%d) allocated %d ranges @ %p\n", __func__,
+                                       pid, nr_ranges_allocated, ranges);
+                       assert(ranges != NULL);
+                       dprintf1("reallocating to hold %d ranges\n", nr_ranges_allocated);
+               }
+       } while (1);
+
+       dprintf2("%s(%d) done\n", __func__, pid);
+
+       return ret;
+}
+
+static inline int vaddr_in_range(unsigned long vaddr, struct vaddr_range *r)
+{
+       if (vaddr < r->start)
+               return 0;
+       if (vaddr >= r->end)
+               return 0;
+       return 1;
+}
+
+static inline int vaddr_mapped_by_range(unsigned long vaddr)
+{
+       int i;
+
+       if (last_range > 0 && vaddr_in_range(vaddr, &ranges[last_range]))
+               return 1;
+
+       for (i = 0; i < nr_ranges_populated; i++) {
+               struct vaddr_range *r = &ranges[i];
+
+               if (vaddr_in_range(vaddr, r))
+                       continue;
+               last_range = i;
+               return 1;
+       }
+       return 0;
+}
+
+const int bt_entry_size_bytes = sizeof(unsigned long) * 4;
+
+void *read_bounds_table_into_buf(unsigned long table_vaddr)
+{
+#ifdef MPX_DIG_STANDALONE
+       static char bt_buf[MPX_BOUNDS_TABLE_SIZE_BYTES];
+       off_t seek_ret = lseek(fd, table_vaddr, SEEK_SET);
+       if (seek_ret != table_vaddr)
+               mpx_dig_abort();
+
+       int read_ret = read(fd, &bt_buf, sizeof(bt_buf));
+       if (read_ret != sizeof(bt_buf))
+               mpx_dig_abort();
+       return &bt_buf;
+#else
+       return (void *)table_vaddr;
+#endif
+}
+
+int dump_table(unsigned long table_vaddr, unsigned long base_controlled_vaddr,
+               unsigned long bde_vaddr)
+{
+       unsigned long offset_inside_bt;
+       int nr_entries = 0;
+       int do_abort = 0;
+       char *bt_buf;
+
+       dprintf3("%s() base_controlled_vaddr: 0x%012lx bde_vaddr: 0x%012lx\n",
+                       __func__, base_controlled_vaddr, bde_vaddr);
+
+       bt_buf = read_bounds_table_into_buf(table_vaddr);
+
+       dprintf4("%s() read done\n", __func__);
+
+       for (offset_inside_bt = 0;
+            offset_inside_bt < MPX_BOUNDS_TABLE_SIZE_BYTES;
+            offset_inside_bt += bt_entry_size_bytes) {
+               unsigned long bt_entry_index;
+               unsigned long bt_entry_controls;
+               unsigned long this_bt_entry_for_vaddr;
+               unsigned long *bt_entry_buf;
+               int i;
+
+               dprintf4("%s() offset_inside_bt: 0x%lx of 0x%llx\n", __func__,
+                       offset_inside_bt, MPX_BOUNDS_TABLE_SIZE_BYTES);
+               bt_entry_buf = (void *)&bt_buf[offset_inside_bt];
+               if (!bt_buf) {
+                       printf("null bt_buf\n");
+                       mpx_dig_abort();
+               }
+               if (!bt_entry_buf) {
+                       printf("null bt_entry_buf\n");
+                       mpx_dig_abort();
+               }
+               dprintf4("%s() reading *bt_entry_buf @ %p\n", __func__,
+                               bt_entry_buf);
+               if (!bt_entry_buf[0] &&
+                   !bt_entry_buf[1] &&
+                   !bt_entry_buf[2] &&
+                   !bt_entry_buf[3])
+                       continue;
+
+               nr_entries++;
+
+               bt_entry_index = offset_inside_bt/bt_entry_size_bytes;
+               bt_entry_controls = sizeof(void *);
+               this_bt_entry_for_vaddr =
+                       base_controlled_vaddr + bt_entry_index*bt_entry_controls;
+               /*
+                * We sign extend vaddr bits 48->63 which effectively
+                * creates a hole in the virtual address space.
+                * This calculation corrects for the hole.
+                */
+               if (this_bt_entry_for_vaddr > 0x00007fffffffffffUL)
+                       this_bt_entry_for_vaddr |= 0xffff800000000000;
+
+               if (!vaddr_mapped_by_range(this_bt_entry_for_vaddr)) {
+                       printf("bt_entry_buf: %p\n", bt_entry_buf);
+                       printf("there is a bte for %lx but no mapping\n",
+                                       this_bt_entry_for_vaddr);
+                       printf("          bde   vaddr: %016lx\n", bde_vaddr);
+                       printf("base_controlled_vaddr: %016lx\n", base_controlled_vaddr);
+                       printf("          table_vaddr: %016lx\n", table_vaddr);
+                       printf("          entry vaddr: %016lx @ offset %lx\n",
+                               table_vaddr + offset_inside_bt, offset_inside_bt);
+                       do_abort = 1;
+                       mpx_dig_abort();
+               }
+               if (DEBUG_LEVEL < 4)
+                       continue;
+
+               printf("table entry[%lx]: ", offset_inside_bt);
+               for (i = 0; i < bt_entry_size_bytes; i += sizeof(unsigned long))
+                       printf("0x%016lx ", bt_entry_buf[i]);
+               printf("\n");
+       }
+       if (do_abort)
+               mpx_dig_abort();
+       dprintf4("%s() done\n",  __func__);
+       return nr_entries;
+}
+
+int search_bd_buf(char *buf, int len_bytes, unsigned long bd_offset_bytes,
+               int *nr_populated_bdes)
+{
+       unsigned long i;
+       int total_entries = 0;
+
+       dprintf3("%s(%p, %x, %lx, ...) buf end: %p\n", __func__, buf,
+                       len_bytes, bd_offset_bytes, buf + len_bytes);
+
+       for (i = 0; i < len_bytes; i += sizeof(unsigned long)) {
+               unsigned long bd_index = (bd_offset_bytes + i) / sizeof(unsigned long);
+               unsigned long *bounds_dir_entry_ptr = (unsigned long *)&buf[i];
+               unsigned long bounds_dir_entry;
+               unsigned long bd_for_vaddr;
+               unsigned long bt_start;
+               unsigned long bt_tail;
+               int nr_entries;
+
+               dprintf4("%s() loop i: %ld bounds_dir_entry_ptr: %p\n", __func__, i,
+                               bounds_dir_entry_ptr);
+
+               bounds_dir_entry = *bounds_dir_entry_ptr;
+               if (!bounds_dir_entry) {
+                       dprintf4("no bounds dir at index 0x%lx / 0x%lx "
+                                "start at offset:%lx %lx\n", bd_index, bd_index,
+                                       bd_offset_bytes, i);
+                       continue;
+               }
+               dprintf3("found bounds_dir_entry: 0x%lx @ "
+                        "index 0x%lx buf ptr: %p\n", bounds_dir_entry, i,
+                                       &buf[i]);
+               /* mask off the enable bit: */
+               bounds_dir_entry &= ~0x1;
+               (*nr_populated_bdes)++;
+               dprintf4("nr_populated_bdes: %p\n", nr_populated_bdes);
+               dprintf4("*nr_populated_bdes: %d\n", *nr_populated_bdes);
+
+               bt_start = bounds_dir_entry;
+               bt_tail = bounds_dir_entry + MPX_BOUNDS_TABLE_SIZE_BYTES - 1;
+               if (!vaddr_mapped_by_range(bt_start)) {
+                       printf("bounds directory 0x%lx points to nowhere\n",
+                                       bounds_dir_entry);
+                       mpx_dig_abort();
+               }
+               if (!vaddr_mapped_by_range(bt_tail)) {
+                       printf("bounds directory end 0x%lx points to nowhere\n",
+                                       bt_tail);
+                       mpx_dig_abort();
+               }
+               /*
+                * Each bounds directory entry controls 1MB of virtual address
+                * space.  This variable is the virtual address in the process
+                * of the beginning of the area controlled by this bounds_dir.
+                */
+               bd_for_vaddr = bd_index * (1UL<<20);
+
+               nr_entries = dump_table(bounds_dir_entry, bd_for_vaddr,
+                               bounds_dir_global+bd_offset_bytes+i);
+               total_entries += nr_entries;
+               dprintf5("dir entry[%4ld @ %p]: 0x%lx %6d entries "
+                        "total this buf: %7d bd_for_vaddrs: 0x%lx -> 0x%lx\n",
+                               bd_index, buf+i,
+                               bounds_dir_entry, nr_entries, total_entries,
+                               bd_for_vaddr, bd_for_vaddr + (1UL<<20));
+       }
+       dprintf3("%s(%p, %x, %lx, ...) done\n", __func__, buf, len_bytes,
+                       bd_offset_bytes);
+       return total_entries;
+}
+
+int proc_pid_mem_fd = -1;
+
+void *fill_bounds_dir_buf_other(long byte_offset_inside_bounds_dir,
+                          long buffer_size_bytes, void *buffer)
+{
+       unsigned long seekto = bounds_dir_global + byte_offset_inside_bounds_dir;
+       int read_ret;
+       off_t seek_ret = lseek(proc_pid_mem_fd, seekto, SEEK_SET);
+
+       if (seek_ret != seekto)
+               mpx_dig_abort();
+
+       read_ret = read(proc_pid_mem_fd, buffer, buffer_size_bytes);
+       /* there shouldn't practically be short reads of /proc/$pid/mem */
+       if (read_ret != buffer_size_bytes)
+               mpx_dig_abort();
+
+       return buffer;
+}
+void *fill_bounds_dir_buf_self(long byte_offset_inside_bounds_dir,
+                          long buffer_size_bytes, void *buffer)
+
+{
+       unsigned char vec[buffer_size_bytes / PAGE_SIZE];
+       char *dig_bounds_dir_ptr =
+               (void *)(bounds_dir_global + byte_offset_inside_bounds_dir);
+       /*
+        * use mincore() to quickly find the areas of the bounds directory
+        * that have memory and thus will be worth scanning.
+        */
+       int incore_ret;
+
+       int incore = 0;
+       int i;
+
+       dprintf4("%s() dig_bounds_dir_ptr: %p\n", __func__, dig_bounds_dir_ptr);
+
+       incore_ret = mincore(dig_bounds_dir_ptr, buffer_size_bytes, &vec[0]);
+       if (incore_ret) {
+               printf("mincore ret: %d\n", incore_ret);
+               perror("mincore");
+               mpx_dig_abort();
+       }
+       for (i = 0; i < sizeof(vec); i++)
+               incore += vec[i];
+       dprintf4("%s() total incore: %d\n", __func__, incore);
+       if (!incore)
+               return NULL;
+       dprintf3("%s() total incore: %d\n", __func__, incore);
+       return dig_bounds_dir_ptr;
+}
+
+int inspect_pid(int pid)
+{
+       static int dig_nr;
+       long offset_inside_bounds_dir;
+       char bounds_dir_buf[sizeof(unsigned long) * (1UL << 15)];
+       char *dig_bounds_dir_ptr;
+       int total_entries = 0;
+       int nr_populated_bdes = 0;
+       int inspect_self;
+
+       if (getpid() == pid) {
+               dprintf4("inspecting self\n");
+               inspect_self = 1;
+       } else {
+               dprintf4("inspecting pid %d\n", pid);
+               mpx_dig_abort();
+       }
+
+       for (offset_inside_bounds_dir = 0;
+            offset_inside_bounds_dir < MPX_BOUNDS_TABLE_SIZE_BYTES;
+            offset_inside_bounds_dir += sizeof(bounds_dir_buf)) {
+               static int bufs_skipped;
+               int this_entries;
+
+               if (inspect_self) {
+                       dig_bounds_dir_ptr =
+                               fill_bounds_dir_buf_self(offset_inside_bounds_dir,
+                                                        sizeof(bounds_dir_buf),
+                                                        &bounds_dir_buf[0]);
+               } else {
+                       dig_bounds_dir_ptr =
+                               fill_bounds_dir_buf_other(offset_inside_bounds_dir,
+                                                         sizeof(bounds_dir_buf),
+                                                         &bounds_dir_buf[0]);
+               }
+               if (!dig_bounds_dir_ptr) {
+                       bufs_skipped++;
+                       continue;
+               }
+               this_entries = search_bd_buf(dig_bounds_dir_ptr,
+                                       sizeof(bounds_dir_buf),
+                                       offset_inside_bounds_dir,
+                                       &nr_populated_bdes);
+               total_entries += this_entries;
+       }
+       printf("mpx dig (%3d) complete, SUCCESS (%8d / %4d)\n", ++dig_nr,
+                       total_entries, nr_populated_bdes);
+       return total_entries + nr_populated_bdes;
+}
+
+#ifdef MPX_DIG_REMOTE
+int main(int argc, char **argv)
+{
+       int err;
+       char *c;
+       unsigned long bounds_dir_entry;
+       int pid;
+
+       printf("mpx-dig starting...\n");
+       err = sscanf(argv[1], "%d", &pid);
+       printf("parsing: '%s', err: %d\n", argv[1], err);
+       if (err != 1)
+               mpx_dig_abort();
+
+       err = sscanf(argv[2], "%lx", &bounds_dir_global);
+       printf("parsing: '%s': %d\n", argv[2], err);
+       if (err != 1)
+               mpx_dig_abort();
+
+       proc_pid_mem_fd = open_proc(pid, "mem");
+       if (proc_pid_mem_fd < 0)
+               mpx_dig_abort();
+
+       inspect_pid(pid);
+       return 0;
+}
+#endif
+
+long inspect_me(struct mpx_bounds_dir *bounds_dir)
+{
+       int pid = getpid();
+
+       pid_load_vaddrs(pid);
+       bounds_dir_global = (unsigned long)bounds_dir;
+       dprintf4("enter %s() bounds dir: %p\n", __func__, bounds_dir);
+       return inspect_pid(pid);
+}
diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h
new file mode 100644 (file)
index 0000000..093c190
--- /dev/null
@@ -0,0 +1,123 @@
+#ifndef _MPX_HW_H
+#define _MPX_HW_H
+
+#include <assert.h>
+
+/* Describe the MPX Hardware Layout in here */
+
+#define NR_MPX_BOUNDS_REGISTERS 4
+
+#ifdef __i386__
+
+#define MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES      16 /* 4 * 32-bits */
+#define MPX_BOUNDS_TABLE_SIZE_BYTES            (1ULL << 14) /* 16k */
+#define MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES                4
+#define MPX_BOUNDS_DIR_SIZE_BYTES              (1ULL << 22) /* 4MB */
+
+#define MPX_BOUNDS_TABLE_BOTTOM_BIT            2
+#define MPX_BOUNDS_TABLE_TOP_BIT               11
+#define MPX_BOUNDS_DIR_BOTTOM_BIT              12
+#define MPX_BOUNDS_DIR_TOP_BIT                 31
+
+#else
+
+/*
+ * Linear Address of "pointer" (LAp)
+ *   0 ->  2: ignored
+ *   3 -> 19: index in to bounds table
+ *  20 -> 47: index in to bounds directory
+ *  48 -> 63: ignored
+ */
+
+#define MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES      32
+#define MPX_BOUNDS_TABLE_SIZE_BYTES            (1ULL << 22) /* 4MB */
+#define MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES                8
+#define MPX_BOUNDS_DIR_SIZE_BYTES              (1ULL << 31) /* 2GB */
+
+#define MPX_BOUNDS_TABLE_BOTTOM_BIT            3
+#define MPX_BOUNDS_TABLE_TOP_BIT               19
+#define MPX_BOUNDS_DIR_BOTTOM_BIT              20
+#define MPX_BOUNDS_DIR_TOP_BIT                 47
+
+#endif
+
+#define MPX_BOUNDS_DIR_NR_ENTRIES      \
+       (MPX_BOUNDS_DIR_SIZE_BYTES/MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES)
+#define MPX_BOUNDS_TABLE_NR_ENTRIES    \
+       (MPX_BOUNDS_TABLE_SIZE_BYTES/MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES)
+
+#define MPX_BOUNDS_TABLE_ENTRY_VALID_BIT       0x1
+
+struct mpx_bd_entry {
+       union {
+               char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
+               void *contents[1];
+       };
+} __attribute__((packed));
+
+struct mpx_bt_entry {
+       union {
+               char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
+               unsigned long contents[1];
+       };
+} __attribute__((packed));
+
+struct mpx_bounds_dir {
+       struct mpx_bd_entry entries[MPX_BOUNDS_DIR_NR_ENTRIES];
+} __attribute__((packed));
+
+struct mpx_bounds_table {
+       struct mpx_bt_entry entries[MPX_BOUNDS_TABLE_NR_ENTRIES];
+} __attribute__((packed));
+
+static inline unsigned long GET_BITS(unsigned long val, int bottombit, int topbit)
+{
+       int total_nr_bits = topbit - bottombit;
+       unsigned long mask = (1UL << total_nr_bits)-1;
+       return (val >> bottombit) & mask;
+}
+
+static inline unsigned long __vaddr_bounds_table_index(void *vaddr)
+{
+       return GET_BITS((unsigned long)vaddr, MPX_BOUNDS_TABLE_BOTTOM_BIT,
+                                             MPX_BOUNDS_TABLE_TOP_BIT);
+}
+
+static inline unsigned long __vaddr_bounds_directory_index(void *vaddr)
+{
+       return GET_BITS((unsigned long)vaddr, MPX_BOUNDS_DIR_BOTTOM_BIT,
+                                             MPX_BOUNDS_DIR_TOP_BIT);
+}
+
+static inline struct mpx_bd_entry *mpx_vaddr_to_bd_entry(void *vaddr,
+               struct mpx_bounds_dir *bounds_dir)
+{
+       unsigned long index = __vaddr_bounds_directory_index(vaddr);
+       return &bounds_dir->entries[index];
+}
+
+static inline int bd_entry_valid(struct mpx_bd_entry *bounds_dir_entry)
+{
+       unsigned long __bd_entry = (unsigned long)bounds_dir_entry->contents;
+       return (__bd_entry & MPX_BOUNDS_TABLE_ENTRY_VALID_BIT);
+}
+
+static inline struct mpx_bounds_table *
+__bd_entry_to_bounds_table(struct mpx_bd_entry *bounds_dir_entry)
+{
+       unsigned long __bd_entry = (unsigned long)bounds_dir_entry->contents;
+       assert(__bd_entry & MPX_BOUNDS_TABLE_ENTRY_VALID_BIT);
+       __bd_entry &= ~MPX_BOUNDS_TABLE_ENTRY_VALID_BIT;
+       return (struct mpx_bounds_table *)__bd_entry;
+}
+
+static inline struct mpx_bt_entry *
+mpx_vaddr_to_bt_entry(void *vaddr, struct mpx_bounds_dir *bounds_dir)
+{
+       struct mpx_bd_entry *bde = mpx_vaddr_to_bd_entry(vaddr, bounds_dir);
+       struct mpx_bounds_table *bt = __bd_entry_to_bounds_table(bde);
+       unsigned long index = __vaddr_bounds_table_index(vaddr);
+       return &bt->entries[index];
+}
+
+#endif /* _MPX_HW_H */
diff --git a/tools/testing/selftests/x86/mpx-mini-test.c b/tools/testing/selftests/x86/mpx-mini-test.c
new file mode 100644 (file)
index 0000000..616ee96
--- /dev/null
@@ -0,0 +1,1585 @@
+/*
+ * mpx-mini-test.c: routines to test Intel MPX (Memory Protection eXtentions)
+ *
+ * Written by:
+ * "Ren, Qiaowei" <qiaowei.ren@intel.com>
+ * "Wei, Gang" <gang.wei@intel.com>
+ * "Hansen, Dave" <dave.hansen@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2.
+ */
+
+/*
+ * 2014-12-05: Dave Hansen: fixed all of the compiler warnings, and made sure
+ *            it works on 32-bit.
+ */
+
+int inspect_every_this_many_mallocs = 100;
+int zap_all_every_this_many_mallocs = 1000;
+
+#define _GNU_SOURCE
+#define _LARGEFILE64_SOURCE
+
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <signal.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <ucontext.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "mpx-hw.h"
+#include "mpx-debug.h"
+#include "mpx-mm.h"
+
+#ifndef __always_inline
+#define __always_inline inline __attribute__((always_inline)
+#endif
+
+#ifndef TEST_DURATION_SECS
+#define TEST_DURATION_SECS 3
+#endif
+
+void write_int_to(char *prefix, char *file, int int_to_write)
+{
+       char buf[100];
+       int fd = open(file, O_RDWR);
+       int len;
+       int ret;
+
+       assert(fd >= 0);
+       len = snprintf(buf, sizeof(buf), "%s%d", prefix, int_to_write);
+       assert(len >= 0);
+       assert(len < sizeof(buf));
+       ret = write(fd, buf, len);
+       assert(ret == len);
+       ret = close(fd);
+       assert(!ret);
+}
+
+void write_pid_to(char *prefix, char *file)
+{
+       write_int_to(prefix, file, getpid());
+}
+
+void trace_me(void)
+{
+/* tracing events dir */
+#define TED "/sys/kernel/debug/tracing/events/"
+/*
+       write_pid_to("common_pid=", TED "signal/filter");
+       write_pid_to("common_pid=", TED "exceptions/filter");
+       write_int_to("", TED "signal/enable", 1);
+       write_int_to("", TED "exceptions/enable", 1);
+*/
+       write_pid_to("", "/sys/kernel/debug/tracing/set_ftrace_pid");
+       write_int_to("", "/sys/kernel/debug/tracing/trace", 0);
+}
+
+#define test_failed() __test_failed(__FILE__, __LINE__)
+static void __test_failed(char *f, int l)
+{
+       fprintf(stderr, "abort @ %s::%d\n", f, l);
+       abort();
+}
+
+/* Error Printf */
+#define eprintf(args...)       fprintf(stderr, args)
+
+#ifdef __i386__
+
+/* i386 directory size is 4MB */
+#define REG_IP_IDX     REG_EIP
+#define REX_PREFIX
+
+#define XSAVE_OFFSET_IN_FPMEM  sizeof(struct _libc_fpstate)
+
+/*
+ * __cpuid() is from the Linux Kernel:
+ */
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+               unsigned int *ecx, unsigned int *edx)
+{
+       /* ecx is often an input as well as an output. */
+       asm volatile(
+               "push %%ebx;"
+               "cpuid;"
+               "mov %%ebx, %1;"
+               "pop %%ebx"
+               : "=a" (*eax),
+                 "=g" (*ebx),
+                 "=c" (*ecx),
+                 "=d" (*edx)
+               : "0" (*eax), "2" (*ecx));
+}
+
+#else /* __i386__ */
+
+#define REG_IP_IDX     REG_RIP
+#define REX_PREFIX "0x48, "
+
+#define XSAVE_OFFSET_IN_FPMEM  0
+
+/*
+ * __cpuid() is from the Linux Kernel:
+ */
+static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
+               unsigned int *ecx, unsigned int *edx)
+{
+       /* ecx is often an input as well as an output. */
+       asm volatile(
+               "cpuid;"
+               : "=a" (*eax),
+                 "=b" (*ebx),
+                 "=c" (*ecx),
+                 "=d" (*edx)
+               : "0" (*eax), "2" (*ecx));
+}
+
+#endif /* !__i386__ */
+
+struct xsave_hdr_struct {
+       uint64_t xstate_bv;
+       uint64_t reserved1[2];
+       uint64_t reserved2[5];
+} __attribute__((packed));
+
+struct bndregs_struct {
+       uint64_t bndregs[8];
+} __attribute__((packed));
+
+struct bndcsr_struct {
+       uint64_t cfg_reg_u;
+       uint64_t status_reg;
+} __attribute__((packed));
+
+struct xsave_struct {
+       uint8_t fpu_sse[512];
+       struct xsave_hdr_struct xsave_hdr;
+       uint8_t ymm[256];
+       uint8_t lwp[128];
+       struct bndregs_struct bndregs;
+       struct bndcsr_struct bndcsr;
+} __attribute__((packed));
+
+uint8_t __attribute__((__aligned__(64))) buffer[4096];
+struct xsave_struct *xsave_buf = (struct xsave_struct *)buffer;
+
+uint8_t __attribute__((__aligned__(64))) test_buffer[4096];
+struct xsave_struct *xsave_test_buf = (struct xsave_struct *)test_buffer;
+
+uint64_t num_bnd_chk;
+
+static __always_inline void xrstor_state(struct xsave_struct *fx, uint64_t mask)
+{
+       uint32_t lmask = mask;
+       uint32_t hmask = mask >> 32;
+
+       asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t"
+                    : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                    :   "memory");
+}
+
+static __always_inline void xsave_state_1(void *_fx, uint64_t mask)
+{
+       uint32_t lmask = mask;
+       uint32_t hmask = mask >> 32;
+       unsigned char *fx = _fx;
+
+       asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
+                    : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                    :   "memory");
+}
+
+static inline uint64_t xgetbv(uint32_t index)
+{
+       uint32_t eax, edx;
+
+       asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
+                    : "=a" (eax), "=d" (edx)
+                    : "c" (index));
+       return eax + ((uint64_t)edx << 32);
+}
+
+static uint64_t read_mpx_status_sig(ucontext_t *uctxt)
+{
+       memset(buffer, 0, sizeof(buffer));
+       memcpy(buffer,
+               (uint8_t *)uctxt->uc_mcontext.fpregs + XSAVE_OFFSET_IN_FPMEM,
+               sizeof(struct xsave_struct));
+
+       return xsave_buf->bndcsr.status_reg;
+}
+
+#include <pthread.h>
+
+static uint8_t *get_next_inst_ip(uint8_t *addr)
+{
+       uint8_t *ip = addr;
+       uint8_t sib;
+       uint8_t rm;
+       uint8_t mod;
+       uint8_t base;
+       uint8_t modrm;
+
+       /* determine the prefix. */
+       switch(*ip) {
+       case 0xf2:
+       case 0xf3:
+       case 0x66:
+               ip++;
+               break;
+       }
+
+       /* look for rex prefix */
+       if ((*ip & 0x40) == 0x40)
+               ip++;
+
+       /* Make sure we have a MPX instruction. */
+       if (*ip++ != 0x0f)
+               return addr;
+
+       /* Skip the op code byte. */
+       ip++;
+
+       /* Get the modrm byte. */
+       modrm = *ip++;
+
+       /* Break it down into parts. */
+       rm = modrm & 7;
+       mod = (modrm >> 6);
+
+       /* Init the parts of the address mode. */
+       base = 8;
+
+       /* Is it a mem mode? */
+       if (mod != 3) {
+               /* look for scaled indexed addressing */
+               if (rm == 4) {
+                       /* SIB addressing */
+                       sib = *ip++;
+                       base = sib & 7;
+                       switch (mod) {
+                       case 0:
+                               if (base == 5)
+                                       ip += 4;
+                               break;
+
+                       case 1:
+                               ip++;
+                               break;
+
+                       case 2:
+                               ip += 4;
+                               break;
+                       }
+
+               } else {
+                       /* MODRM addressing */
+                       switch (mod) {
+                       case 0:
+                               /* DISP32 addressing, no base */
+                               if (rm == 5)
+                                       ip += 4;
+                               break;
+
+                       case 1:
+                               ip++;
+                               break;
+
+                       case 2:
+                               ip += 4;
+                               break;
+                       }
+               }
+       }
+       return ip;
+}
+
+#ifdef si_lower
+static inline void *__si_bounds_lower(siginfo_t *si)
+{
+       return si->si_lower;
+}
+
+static inline void *__si_bounds_upper(siginfo_t *si)
+{
+       return si->si_upper;
+}
+#else
+static inline void **__si_bounds_hack(siginfo_t *si)
+{
+       void *sigfault = &si->_sifields._sigfault;
+       void *end_sigfault = sigfault + sizeof(si->_sifields._sigfault);
+       void **__si_lower = end_sigfault;
+
+       return __si_lower;
+}
+
+static inline void *__si_bounds_lower(siginfo_t *si)
+{
+       return *__si_bounds_hack(si);
+}
+
+static inline void *__si_bounds_upper(siginfo_t *si)
+{
+       return (*__si_bounds_hack(si)) + sizeof(void *);
+}
+#endif
+
+static int br_count;
+static int expected_bnd_index = -1;
+uint64_t shadow_plb[NR_MPX_BOUNDS_REGISTERS][2]; /* shadow MPX bound registers */
+unsigned long shadow_map[NR_MPX_BOUNDS_REGISTERS];
+
+/*
+ * The kernel is supposed to provide some information about the bounds
+ * exception in the siginfo.  It should match what we have in the bounds
+ * registers that we are checking against.  Just check against the shadow copy
+ * since it is easily available, and we also check that *it* matches the real
+ * registers.
+ */
+void check_siginfo_vs_shadow(siginfo_t* si)
+{
+       int siginfo_ok = 1;
+       void *shadow_lower = (void *)(unsigned long)shadow_plb[expected_bnd_index][0];
+       void *shadow_upper = (void *)(unsigned long)shadow_plb[expected_bnd_index][1];
+
+       if ((expected_bnd_index < 0) ||
+           (expected_bnd_index >= NR_MPX_BOUNDS_REGISTERS)) {
+               fprintf(stderr, "ERROR: invalid expected_bnd_index: %d\n",
+                       expected_bnd_index);
+               exit(6);
+       }
+       if (__si_bounds_lower(si) != shadow_lower)
+               siginfo_ok = 0;
+       if (__si_bounds_upper(si) != shadow_upper)
+               siginfo_ok = 0;
+
+       if (!siginfo_ok) {
+               fprintf(stderr, "ERROR: siginfo bounds do not match "
+                       "shadow bounds for register %d\n", expected_bnd_index);
+               exit(7);
+       }
+}
+
+void handler(int signum, siginfo_t *si, void *vucontext)
+{
+       int i;
+       ucontext_t *uctxt = vucontext;
+       int trapno;
+       unsigned long ip;
+
+       dprintf1("entered signal handler\n");
+
+       trapno = uctxt->uc_mcontext.gregs[REG_TRAPNO];
+       ip = uctxt->uc_mcontext.gregs[REG_IP_IDX];
+
+       if (trapno == 5) {
+               typeof(si->si_addr) *si_addr_ptr = &si->si_addr;
+               uint64_t status = read_mpx_status_sig(uctxt);
+               uint64_t br_reason =  status & 0x3;
+
+               br_count++;
+               dprintf1("#BR 0x%jx (total seen: %d)\n", status, br_count);
+
+#define __SI_FAULT      (3 << 16)
+#define SEGV_BNDERR     (__SI_FAULT|3)  /* failed address bound checks */
+
+               dprintf2("Saw a #BR! status 0x%jx at %016lx br_reason: %jx\n",
+                               status, ip, br_reason);
+               dprintf2("si_signo: %d\n", si->si_signo);
+               dprintf2("  signum: %d\n", signum);
+               dprintf2("info->si_code == SEGV_BNDERR: %d\n",
+                               (si->si_code == SEGV_BNDERR));
+               dprintf2("info->si_code: %d\n", si->si_code);
+               dprintf2("info->si_lower: %p\n", __si_bounds_lower(si));
+               dprintf2("info->si_upper: %p\n", __si_bounds_upper(si));
+
+               check_siginfo_vs_shadow(si);
+
+               for (i = 0; i < 8; i++)
+                       dprintf3("[%d]: %p\n", i, si_addr_ptr[i]);
+               switch (br_reason) {
+               case 0: /* traditional BR */
+                       fprintf(stderr,
+                               "Undefined status with bound exception:%jx\n",
+                                status);
+                       exit(5);
+               case 1: /* #BR MPX bounds exception */
+                       /* these are normal and we expect to see them */
+                       dprintf1("bounds exception (normal): status 0x%jx at %p si_addr: %p\n",
+                               status, (void *)ip, si->si_addr);
+                       num_bnd_chk++;
+                       uctxt->uc_mcontext.gregs[REG_IP_IDX] =
+                               (greg_t)get_next_inst_ip((uint8_t *)ip);
+                       break;
+               case 2:
+                       fprintf(stderr, "#BR status == 2, missing bounds table,"
+                                       "kernel should have handled!!\n");
+                       exit(4);
+                       break;
+               default:
+                       fprintf(stderr, "bound check error: status 0x%jx at %p\n",
+                               status, (void *)ip);
+                       num_bnd_chk++;
+                       uctxt->uc_mcontext.gregs[REG_IP_IDX] =
+                               (greg_t)get_next_inst_ip((uint8_t *)ip);
+                       fprintf(stderr, "bound check error: si_addr %p\n", si->si_addr);
+                       exit(3);
+               }
+       } else if (trapno == 14) {
+               eprintf("ERROR: In signal handler, page fault, trapno = %d, ip = %016lx\n",
+                       trapno, ip);
+               eprintf("si_addr %p\n", si->si_addr);
+               eprintf("REG_ERR: %lx\n", (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
+               test_failed();
+       } else {
+               eprintf("unexpected trap %d! at 0x%lx\n", trapno, ip);
+               eprintf("si_addr %p\n", si->si_addr);
+               eprintf("REG_ERR: %lx\n", (unsigned long)uctxt->uc_mcontext.gregs[REG_ERR]);
+               test_failed();
+       }
+}
+
+static inline void cpuid_count(unsigned int op, int count,
+                              unsigned int *eax, unsigned int *ebx,
+                              unsigned int *ecx, unsigned int *edx)
+{
+       *eax = op;
+       *ecx = count;
+       __cpuid(eax, ebx, ecx, edx);
+}
+
+#define XSTATE_CPUID       0x0000000d
+
+/*
+ * List of XSAVE features Linux knows about:
+ */
+enum xfeature_bit {
+       XSTATE_BIT_FP,
+       XSTATE_BIT_SSE,
+       XSTATE_BIT_YMM,
+       XSTATE_BIT_BNDREGS,
+       XSTATE_BIT_BNDCSR,
+       XSTATE_BIT_OPMASK,
+       XSTATE_BIT_ZMM_Hi256,
+       XSTATE_BIT_Hi16_ZMM,
+
+       XFEATURES_NR_MAX,
+};
+
+#define XSTATE_FP             (1 << XSTATE_BIT_FP)
+#define XSTATE_SSE           (1 << XSTATE_BIT_SSE)
+#define XSTATE_YMM           (1 << XSTATE_BIT_YMM)
+#define XSTATE_BNDREGS   (1 << XSTATE_BIT_BNDREGS)
+#define XSTATE_BNDCSR     (1 << XSTATE_BIT_BNDCSR)
+#define XSTATE_OPMASK     (1 << XSTATE_BIT_OPMASK)
+#define XSTATE_ZMM_Hi256       (1 << XSTATE_BIT_ZMM_Hi256)
+#define XSTATE_Hi16_ZMM         (1 << XSTATE_BIT_Hi16_ZMM)
+
+#define MPX_XSTATES            (XSTATE_BNDREGS | XSTATE_BNDCSR) /* 0x18 */
+
+bool one_bit(unsigned int x, int bit)
+{
+       return !!(x & (1<<bit));
+}
+
+void print_state_component(int state_bit_nr, char *name)
+{
+       unsigned int eax, ebx, ecx, edx;
+       unsigned int state_component_size;
+       unsigned int state_component_supervisor;
+       unsigned int state_component_user;
+       unsigned int state_component_aligned;
+
+       /* See SDM Section 13.2 */
+       cpuid_count(XSTATE_CPUID, state_bit_nr, &eax, &ebx, &ecx, &edx);
+       assert(eax || ebx || ecx);
+       state_component_size = eax;
+       state_component_supervisor = ((!ebx) && one_bit(ecx, 0));
+       state_component_user = !one_bit(ecx, 0);
+       state_component_aligned = one_bit(ecx, 1);
+       printf("%8s: size: %d user: %d supervisor: %d aligned: %d\n",
+               name,
+               state_component_size,       state_component_user,
+               state_component_supervisor, state_component_aligned);
+
+}
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx) */
+#define XSAVE_FEATURE_BIT       (26)  /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define OSXSAVE_FEATURE_BIT     (27) /* XSAVE enabled in the OS */
+
+bool check_mpx_support(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+
+       cpuid_count(1, 0, &eax, &ebx, &ecx, &edx);
+
+       /* We can't do much without XSAVE, so just make these assert()'s */
+       if (!one_bit(ecx, XSAVE_FEATURE_BIT)) {
+               fprintf(stderr, "processor lacks XSAVE, can not run MPX tests\n");
+               exit(0);
+       }
+
+       if (!one_bit(ecx, OSXSAVE_FEATURE_BIT)) {
+               fprintf(stderr, "processor lacks OSXSAVE, can not run MPX tests\n");
+               exit(0);
+       }
+
+       /* CPUs not supporting the XSTATE CPUID leaf do not support MPX */
+       /* Is this redundant with the feature bit checks? */
+       cpuid_count(0, 0, &eax, &ebx, &ecx, &edx);
+       if (eax < XSTATE_CPUID) {
+               fprintf(stderr, "processor lacks XSTATE CPUID leaf,"
+                               " can not run MPX tests\n");
+               exit(0);
+       }
+
+       printf("XSAVE is supported by HW & OS\n");
+
+       cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
+
+       printf("XSAVE processor supported state mask: 0x%x\n", eax);
+       printf("XSAVE OS supported state mask: 0x%jx\n", xgetbv(0));
+
+       /* Make sure that the MPX states are enabled in in XCR0 */
+       if ((eax & MPX_XSTATES) != MPX_XSTATES) {
+               fprintf(stderr, "processor lacks MPX XSTATE(s), can not run MPX tests\n");
+               exit(0);
+       }
+
+       /* Make sure the MPX states are supported by XSAVE* */
+       if ((xgetbv(0) & MPX_XSTATES) != MPX_XSTATES) {
+               fprintf(stderr, "MPX XSTATE(s) no enabled in XCR0, "
+                               "can not run MPX tests\n");
+               exit(0);
+       }
+
+       print_state_component(XSTATE_BIT_BNDREGS, "BNDREGS");
+       print_state_component(XSTATE_BIT_BNDCSR,  "BNDCSR");
+
+       return true;
+}
+
+void enable_mpx(void *l1base)
+{
+       /* enable point lookup */
+       memset(buffer, 0, sizeof(buffer));
+       xrstor_state(xsave_buf, 0x18);
+
+       xsave_buf->xsave_hdr.xstate_bv = 0x10;
+       xsave_buf->bndcsr.cfg_reg_u = (unsigned long)l1base | 1;
+       xsave_buf->bndcsr.status_reg = 0;
+
+       dprintf2("bf xrstor\n");
+       dprintf2("xsave cndcsr: status %jx, configu %jx\n",
+              xsave_buf->bndcsr.status_reg, xsave_buf->bndcsr.cfg_reg_u);
+       xrstor_state(xsave_buf, 0x18);
+       dprintf2("after xrstor\n");
+
+       xsave_state_1(xsave_buf, 0x18);
+
+       dprintf1("xsave bndcsr: status %jx, configu %jx\n",
+              xsave_buf->bndcsr.status_reg, xsave_buf->bndcsr.cfg_reg_u);
+}
+
+#include <sys/prctl.h>
+
+struct mpx_bounds_dir *bounds_dir_ptr;
+
+unsigned long __bd_incore(const char *func, int line)
+{
+       unsigned long ret = nr_incore(bounds_dir_ptr, MPX_BOUNDS_DIR_SIZE_BYTES);
+       return ret;
+}
+#define bd_incore() __bd_incore(__func__, __LINE__)
+
+void check_clear(void *ptr, unsigned long sz)
+{
+       unsigned long *i;
+
+       for (i = ptr; (void *)i < ptr + sz; i++) {
+               if (*i) {
+                       dprintf1("%p is NOT clear at %p\n", ptr, i);
+                       assert(0);
+               }
+       }
+       dprintf1("%p is clear for %lx\n", ptr, sz);
+}
+
+void check_clear_bd(void)
+{
+       check_clear(bounds_dir_ptr, 2UL << 30);
+}
+
+#define USE_MALLOC_FOR_BOUNDS_DIR 1
+bool process_specific_init(void)
+{
+       unsigned long size;
+       unsigned long *dir;
+       /* Guarantee we have the space to align it, add padding: */
+       unsigned long pad = getpagesize();
+
+       size = 2UL << 30; /* 2GB */
+       if (sizeof(unsigned long) == 4)
+               size = 4UL << 20; /* 4MB */
+       dprintf1("trying to allocate %ld MB bounds directory\n", (size >> 20));
+
+       if (USE_MALLOC_FOR_BOUNDS_DIR) {
+               unsigned long _dir;
+
+               dir = malloc(size + pad);
+               assert(dir);
+               _dir = (unsigned long)dir;
+               _dir += 0xfffUL;
+               _dir &= ~0xfffUL;
+               dir = (void *)_dir;
+       } else {
+               /*
+                * This makes debugging easier because the address
+                * calculations are simpler:
+                */
+               dir = mmap((void *)0x200000000000, size + pad,
+                               PROT_READ|PROT_WRITE,
+                               MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+               if (dir == (void *)-1) {
+                       perror("unable to allocate bounds directory");
+                       abort();
+               }
+               check_clear(dir, size);
+       }
+       bounds_dir_ptr = (void *)dir;
+       madvise(bounds_dir_ptr, size, MADV_NOHUGEPAGE);
+       bd_incore();
+       dprintf1("bounds directory: 0x%p -> 0x%p\n", bounds_dir_ptr,
+                       (char *)bounds_dir_ptr + size);
+       check_clear(dir, size);
+       enable_mpx(dir);
+       check_clear(dir, size);
+       if (prctl(43, 0, 0, 0, 0)) {
+               printf("no MPX support\n");
+               abort();
+               return false;
+       }
+       return true;
+}
+
+bool process_specific_finish(void)
+{
+       if (prctl(44)) {
+               printf("no MPX support\n");
+               return false;
+       }
+       return true;
+}
+
+void setup_handler()
+{
+       int r, rs;
+       struct sigaction newact;
+       struct sigaction oldact;
+
+       /* #BR is mapped to sigsegv */
+       int signum  = SIGSEGV;
+
+       newact.sa_handler = 0;   /* void(*)(int)*/
+       newact.sa_sigaction = handler; /* void (*)(int, siginfo_t*, void *) */
+
+       /*sigset_t - signals to block while in the handler */
+       /* get the old signal mask. */
+       rs = sigprocmask(SIG_SETMASK, 0, &newact.sa_mask);
+       assert(rs == 0);
+
+       /* call sa_sigaction, not sa_handler*/
+       newact.sa_flags = SA_SIGINFO;
+
+       newact.sa_restorer = 0;  /* void(*)(), obsolete */
+       r = sigaction(signum, &newact, &oldact);
+       assert(r == 0);
+}
+
+void mpx_prepare(void)
+{
+       dprintf2("%s()\n", __func__);
+       setup_handler();
+       process_specific_init();
+}
+
+void mpx_cleanup(void)
+{
+       printf("%s(): %jd BRs. bye...\n", __func__, num_bnd_chk);
+       process_specific_finish();
+}
+
+/*-------------- the following is test case ---------------*/
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <time.h>
+
+uint64_t num_lower_brs;
+uint64_t num_upper_brs;
+
+#define MPX_CONFIG_OFFSET 1024
+#define MPX_BOUNDS_OFFSET 960
+#define MPX_HEADER_OFFSET 512
+#define MAX_ADDR_TESTED (1<<28)
+#define TEST_ROUNDS 100
+
+/*
+      0F 1A /r BNDLDX-Load
+      0F 1B /r BNDSTX-Store Extended Bounds Using Address Translation
+   66 0F 1A /r BNDMOV bnd1, bnd2/m128
+   66 0F 1B /r BNDMOV bnd1/m128, bnd2
+   F2 0F 1A /r BNDCU bnd, r/m64
+   F2 0F 1B /r BNDCN bnd, r/m64
+   F3 0F 1A /r BNDCL bnd, r/m64
+   F3 0F 1B /r BNDMK bnd, m64
+*/
+
+static __always_inline void xsave_state(void *_fx, uint64_t mask)
+{
+       uint32_t lmask = mask;
+       uint32_t hmask = mask >> 32;
+       unsigned char *fx = _fx;
+
+       asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x27\n\t"
+                    : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
+                    :   "memory");
+}
+
+static __always_inline void mpx_clear_bnd0(void)
+{
+       long size = 0;
+       void *ptr = NULL;
+       /* F3 0F 1B /r BNDMK bnd, m64                   */
+       /* f3 0f 1b 04 11    bndmk  (%rcx,%rdx,1),%bnd0 */
+       asm volatile(".byte 0xf3,0x0f,0x1b,0x04,0x11\n\t"
+                    : : "c" (ptr), "d" (size-1)
+                    :   "memory");
+}
+
+static __always_inline void mpx_make_bound_helper(unsigned long ptr,
+               unsigned long size)
+{
+       /* F3 0F 1B /r          BNDMK bnd, m64                  */
+       /* f3 0f 1b 04 11       bndmk  (%rcx,%rdx,1),%bnd0      */
+       asm volatile(".byte 0xf3,0x0f,0x1b,0x04,0x11\n\t"
+                    : : "c" (ptr), "d" (size-1)
+                    :   "memory");
+}
+
+static __always_inline void mpx_check_lowerbound_helper(unsigned long ptr)
+{
+       /* F3 0F 1A /r  NDCL bnd, r/m64                 */
+       /* f3 0f 1a 01  bndcl  (%rcx),%bnd0             */
+       asm volatile(".byte 0xf3,0x0f,0x1a,0x01\n\t"
+                    : : "c" (ptr)
+                    :   "memory");
+}
+
+static __always_inline void mpx_check_upperbound_helper(unsigned long ptr)
+{
+       /* F2 0F 1A /r  BNDCU bnd, r/m64        */
+       /* f2 0f 1a 01  bndcu  (%rcx),%bnd0     */
+       asm volatile(".byte 0xf2,0x0f,0x1a,0x01\n\t"
+                    : : "c" (ptr)
+                    :   "memory");
+}
+
+static __always_inline void mpx_movbndreg_helper()
+{
+       /* 66 0F 1B /r  BNDMOV bnd1/m128, bnd2  */
+       /* 66 0f 1b c2  bndmov %bnd0,%bnd2      */
+
+       asm volatile(".byte 0x66,0x0f,0x1b,0xc2\n\t");
+}
+
+static __always_inline void mpx_movbnd2mem_helper(uint8_t *mem)
+{
+       /* 66 0F 1B /r  BNDMOV bnd1/m128, bnd2  */
+       /* 66 0f 1b 01  bndmov %bnd0,(%rcx)     */
+       asm volatile(".byte 0x66,0x0f,0x1b,0x01\n\t"
+                    : : "c" (mem)
+                    :   "memory");
+}
+
+static __always_inline void mpx_movbnd_from_mem_helper(uint8_t *mem)
+{
+       /* 66 0F 1A /r  BNDMOV bnd1, bnd2/m128  */
+       /* 66 0f 1a 01  bndmov (%rcx),%bnd0     */
+       asm volatile(".byte 0x66,0x0f,0x1a,0x01\n\t"
+                    : : "c" (mem)
+                    :   "memory");
+}
+
+static __always_inline void mpx_store_dsc_helper(unsigned long ptr_addr,
+               unsigned long ptr_val)
+{
+       /* 0F 1B /r     BNDSTX-Store Extended Bounds Using Address Translation  */
+       /* 0f 1b 04 11  bndstx %bnd0,(%rcx,%rdx,1)                              */
+       asm volatile(".byte 0x0f,0x1b,0x04,0x11\n\t"
+                    : : "c" (ptr_addr), "d" (ptr_val)
+                    :   "memory");
+}
+
+static __always_inline void mpx_load_dsc_helper(unsigned long ptr_addr,
+               unsigned long ptr_val)
+{
+       /* 0F 1A /r     BNDLDX-Load                     */
+       /*/ 0f 1a 04 11 bndldx (%rcx,%rdx,1),%bnd0      */
+       asm volatile(".byte 0x0f,0x1a,0x04,0x11\n\t"
+                    : : "c" (ptr_addr), "d" (ptr_val)
+                    :   "memory");
+}
+
+void __print_context(void *__print_xsave_buffer, int line)
+{
+       uint64_t *bounds = (uint64_t *)(__print_xsave_buffer + MPX_BOUNDS_OFFSET);
+       uint64_t *cfg    = (uint64_t *)(__print_xsave_buffer + MPX_CONFIG_OFFSET);
+
+       int i;
+       eprintf("%s()::%d\n", "print_context", line);
+       for (i = 0; i < 4; i++) {
+               eprintf("bound[%d]: 0x%016lx 0x%016lx(0x%016lx)\n", i,
+                      (unsigned long)bounds[i*2],
+                      ~(unsigned long)bounds[i*2+1],
+                       (unsigned long)bounds[i*2+1]);
+       }
+
+       eprintf("cpcfg: %jx  cpstatus: %jx\n", cfg[0], cfg[1]);
+}
+#define print_context(x) __print_context(x, __LINE__)
+#ifdef DEBUG
+#define dprint_context(x) print_context(x)
+#else
+#define dprint_context(x) do{}while(0)
+#endif
+
+void init()
+{
+       int i;
+
+       srand((unsigned int)time(NULL));
+
+       for (i = 0; i < 4; i++) {
+               shadow_plb[i][0] = 0;
+               shadow_plb[i][1] = ~(unsigned long)0;
+       }
+}
+
+long int __mpx_random(int line)
+{
+#ifdef NOT_SO_RANDOM
+       static long fake = 722122311;
+       fake += 563792075;
+       return fakse;
+#else
+       return random();
+#endif
+}
+#define mpx_random() __mpx_random(__LINE__)
+
+uint8_t *get_random_addr()
+{
+       uint8_t*addr = (uint8_t *)(unsigned long)(rand() % MAX_ADDR_TESTED);
+       return (addr - (unsigned long)addr % sizeof(uint8_t *));
+}
+
+static inline bool compare_context(void *__xsave_buffer)
+{
+       uint64_t *bounds = (uint64_t *)(__xsave_buffer + MPX_BOUNDS_OFFSET);
+
+       int i;
+       for (i = 0; i < 4; i++) {
+               dprintf3("shadow[%d]{%016lx/%016lx}\nbounds[%d]{%016lx/%016lx}\n",
+                      i, (unsigned long)shadow_plb[i][0], (unsigned long)shadow_plb[i][1],
+                      i, (unsigned long)bounds[i*2],     ~(unsigned long)bounds[i*2+1]);
+               if ((shadow_plb[i][0] != bounds[i*2]) ||
+                   (shadow_plb[i][1] != ~(unsigned long)bounds[i*2+1])) {
+                       eprintf("ERROR comparing shadow to real bound register %d\n", i);
+                       eprintf("shadow{0x%016lx/0x%016lx}\nbounds{0x%016lx/0x%016lx}\n",
+                              (unsigned long)shadow_plb[i][0], (unsigned long)shadow_plb[i][1],
+                              (unsigned long)bounds[i*2], (unsigned long)bounds[i*2+1]);
+                       return false;
+               }
+       }
+
+       return true;
+}
+
+void mkbnd_shadow(uint8_t *ptr, int index, long offset)
+{
+       uint64_t *lower = (uint64_t *)&(shadow_plb[index][0]);
+       uint64_t *upper = (uint64_t *)&(shadow_plb[index][1]);
+       *lower = (unsigned long)ptr;
+       *upper = (unsigned long)ptr + offset - 1;
+}
+
+void check_lowerbound_shadow(uint8_t *ptr, int index)
+{
+       uint64_t *lower = (uint64_t *)&(shadow_plb[index][0]);
+       if (*lower > (uint64_t)(unsigned long)ptr)
+               num_lower_brs++;
+       else
+               dprintf1("LowerBoundChk passed:%p\n", ptr);
+}
+
+void check_upperbound_shadow(uint8_t *ptr, int index)
+{
+       uint64_t upper = *(uint64_t *)&(shadow_plb[index][1]);
+       if (upper < (uint64_t)(unsigned long)ptr)
+               num_upper_brs++;
+       else
+               dprintf1("UpperBoundChk passed:%p\n", ptr);
+}
+
+__always_inline void movbndreg_shadow(int src, int dest)
+{
+       shadow_plb[dest][0] = shadow_plb[src][0];
+       shadow_plb[dest][1] = shadow_plb[src][1];
+}
+
+__always_inline void movbnd2mem_shadow(int src, unsigned long *dest)
+{
+       unsigned long *lower = (unsigned long *)&(shadow_plb[src][0]);
+       unsigned long *upper = (unsigned long *)&(shadow_plb[src][1]);
+       *dest = *lower;
+       *(dest+1) = *upper;
+}
+
+__always_inline void movbnd_from_mem_shadow(unsigned long *src, int dest)
+{
+       unsigned long *lower = (unsigned long *)&(shadow_plb[dest][0]);
+       unsigned long *upper = (unsigned long *)&(shadow_plb[dest][1]);
+       *lower = *src;
+       *upper = *(src+1);
+}
+
+__always_inline void stdsc_shadow(int index, uint8_t *ptr, uint8_t *ptr_val)
+{
+       shadow_map[0] = (unsigned long)shadow_plb[index][0];
+       shadow_map[1] = (unsigned long)shadow_plb[index][1];
+       shadow_map[2] = (unsigned long)ptr_val;
+       dprintf3("%s(%d, %p, %p) set shadow map[2]: %p\n", __func__,
+                       index, ptr, ptr_val, ptr_val);
+       /*ptr ignored */
+}
+
+void lddsc_shadow(int index, uint8_t *ptr, uint8_t *ptr_val)
+{
+       uint64_t lower = shadow_map[0];
+       uint64_t upper = shadow_map[1];
+       uint8_t *value = (uint8_t *)shadow_map[2];
+
+       if (value != ptr_val) {
+               dprintf2("%s(%d, %p, %p) init shadow bounds[%d] "
+                        "because %p != %p\n", __func__, index, ptr,
+                        ptr_val, index, value, ptr_val);
+               shadow_plb[index][0] = 0;
+               shadow_plb[index][1] = ~(unsigned long)0;
+       } else {
+               shadow_plb[index][0] = lower;
+               shadow_plb[index][1] = upper;
+       }
+       /* ptr ignored */
+}
+
+static __always_inline void mpx_test_helper0(uint8_t *buf, uint8_t *ptr)
+{
+       mpx_make_bound_helper((unsigned long)ptr, 0x1800);
+}
+
+static __always_inline void mpx_test_helper0_shadow(uint8_t *buf, uint8_t *ptr)
+{
+       mkbnd_shadow(ptr, 0, 0x1800);
+}
+
+static __always_inline void mpx_test_helper1(uint8_t *buf, uint8_t *ptr)
+{
+       /* these are hard-coded to check bnd0 */
+       expected_bnd_index = 0;
+       mpx_check_lowerbound_helper((unsigned long)(ptr-1));
+       mpx_check_upperbound_helper((unsigned long)(ptr+0x1800));
+       /* reset this since we do not expect any more bounds exceptions */
+       expected_bnd_index = -1;
+}
+
+static __always_inline void mpx_test_helper1_shadow(uint8_t *buf, uint8_t *ptr)
+{
+       check_lowerbound_shadow(ptr-1, 0);
+       check_upperbound_shadow(ptr+0x1800, 0);
+}
+
+static __always_inline void mpx_test_helper2(uint8_t *buf, uint8_t *ptr)
+{
+       mpx_make_bound_helper((unsigned long)ptr, 0x1800);
+       mpx_movbndreg_helper();
+       mpx_movbnd2mem_helper(buf);
+       mpx_make_bound_helper((unsigned long)(ptr+0x12), 0x1800);
+}
+
+static __always_inline void mpx_test_helper2_shadow(uint8_t *buf, uint8_t *ptr)
+{
+       mkbnd_shadow(ptr, 0, 0x1800);
+       movbndreg_shadow(0, 2);
+       movbnd2mem_shadow(0, (unsigned long *)buf);
+       mkbnd_shadow(ptr+0x12, 0, 0x1800);
+}
+
+static __always_inline void mpx_test_helper3(uint8_t *buf, uint8_t *ptr)
+{
+       mpx_movbnd_from_mem_helper(buf);
+}
+
+static __always_inline void mpx_test_helper3_shadow(uint8_t *buf, uint8_t *ptr)
+{
+       movbnd_from_mem_shadow((unsigned long *)buf, 0);
+}
+
+static __always_inline void mpx_test_helper4(uint8_t *buf, uint8_t *ptr)
+{
+       mpx_store_dsc_helper((unsigned long)buf, (unsigned long)ptr);
+       mpx_make_bound_helper((unsigned long)(ptr+0x12), 0x1800);
+}
+
+static __always_inline void mpx_test_helper4_shadow(uint8_t *buf, uint8_t *ptr)
+{
+       stdsc_shadow(0, buf, ptr);
+       mkbnd_shadow(ptr+0x12, 0, 0x1800);
+}
+
+static __always_inline void mpx_test_helper5(uint8_t *buf, uint8_t *ptr)
+{
+       mpx_load_dsc_helper((unsigned long)buf, (unsigned long)ptr);
+}
+
+static __always_inline void mpx_test_helper5_shadow(uint8_t *buf, uint8_t *ptr)
+{
+       lddsc_shadow(0, buf, ptr);
+}
+
+#define NR_MPX_TEST_FUNCTIONS 6
+
+/*
+ * For compatibility reasons, MPX will clear the bounds registers
+ * when you make function calls (among other things).  We have to
+ * preserve the registers in between calls to the "helpers" since
+ * they build on each other.
+ *
+ * Be very careful not to make any function calls inside the
+ * helpers, or anywhere else beween the xrstor and xsave.
+ */
+#define run_helper(helper_nr, buf, buf_shadow, ptr)    do {    \
+       xrstor_state(xsave_test_buf, flags);                    \
+       mpx_test_helper##helper_nr(buf, ptr);                   \
+       xsave_state(xsave_test_buf, flags);                     \
+       mpx_test_helper##helper_nr##_shadow(buf_shadow, ptr);   \
+} while (0)
+
+static void run_helpers(int nr, uint8_t *buf, uint8_t *buf_shadow, uint8_t *ptr)
+{
+       uint64_t flags = 0x18;
+
+       dprint_context(xsave_test_buf);
+       switch (nr) {
+       case 0:
+               run_helper(0, buf, buf_shadow, ptr);
+               break;
+       case 1:
+               run_helper(1, buf, buf_shadow, ptr);
+               break;
+       case 2:
+               run_helper(2, buf, buf_shadow, ptr);
+               break;
+       case 3:
+               run_helper(3, buf, buf_shadow, ptr);
+               break;
+       case 4:
+               run_helper(4, buf, buf_shadow, ptr);
+               break;
+       case 5:
+               run_helper(5, buf, buf_shadow, ptr);
+               break;
+       default:
+               test_failed();
+               break;
+       }
+       dprint_context(xsave_test_buf);
+}
+
+unsigned long buf_shadow[1024]; /* used to check load / store descriptors */
+extern long inspect_me(struct mpx_bounds_dir *bounds_dir);
+
+long cover_buf_with_bt_entries(void *buf, long buf_len)
+{
+       int i;
+       long nr_to_fill;
+       int ratio = 1000;
+       unsigned long buf_len_in_ptrs;
+
+       /* Fill about 1/100 of the space with bt entries */
+       nr_to_fill = buf_len / (sizeof(unsigned long) * ratio);
+
+       if (!nr_to_fill)
+               dprintf3("%s() nr_to_fill: %ld\n", __func__, nr_to_fill);
+
+       /* Align the buffer to pointer size */
+       while (((unsigned long)buf) % sizeof(void *)) {
+               buf++;
+               buf_len--;
+       }
+       /* We are storing pointers, so make */
+       buf_len_in_ptrs = buf_len / sizeof(void *);
+
+       for (i = 0; i < nr_to_fill; i++) {
+               long index = (mpx_random() % buf_len_in_ptrs);
+               void *ptr = buf + index * sizeof(unsigned long);
+               unsigned long ptr_addr = (unsigned long)ptr;
+
+               /* ptr and size can be anything */
+               mpx_make_bound_helper((unsigned long)ptr, 8);
+
+               /*
+                * take bnd0 and put it in to bounds tables "buf + index" is an
+                * address inside the buffer where we are pretending that we
+                * are going to put a pointer We do not, though because we will
+                * never load entries from the table, so it doesn't matter.
+                */
+               mpx_store_dsc_helper(ptr_addr, (unsigned long)ptr);
+               dprintf4("storing bound table entry for %lx (buf start @ %p)\n",
+                               ptr_addr, buf);
+       }
+       return nr_to_fill;
+}
+
+unsigned long align_down(unsigned long alignme, unsigned long align_to)
+{
+       return alignme & ~(align_to-1);
+}
+
+unsigned long align_up(unsigned long alignme, unsigned long align_to)
+{
+       return (alignme + align_to - 1) & ~(align_to-1);
+}
+
+/*
+ * Using 1MB alignment guarantees that each no allocation
+ * will overlap with another's bounds tables.
+ *
+ * We have to cook our own allocator here.  malloc() can
+ * mix other allocation with ours which means that even
+ * if we free all of our allocations, there might still
+ * be bounds tables for the *areas* since there is other
+ * valid memory there.
+ *
+ * We also can't use malloc() because a free() of an area
+ * might not free it back to the kernel.  We want it
+ * completely unmapped an malloc() does not guarantee
+ * that.
+ */
+#ifdef __i386__
+long alignment = 4096;
+long sz_alignment = 4096;
+#else
+long alignment = 1 * MB;
+long sz_alignment = 1 * MB;
+#endif
+void *mpx_mini_alloc(unsigned long sz)
+{
+       unsigned long long tries = 0;
+       static void *last;
+       void *ptr;
+       void *try_at;
+
+       sz = align_up(sz, sz_alignment);
+
+       try_at = last + alignment;
+       while (1) {
+               ptr = mmap(try_at, sz, PROT_READ|PROT_WRITE,
+                               MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+               if (ptr == (void *)-1)
+                       return NULL;
+               if (ptr == try_at)
+                       break;
+
+               munmap(ptr, sz);
+               try_at += alignment;
+#ifdef __i386__
+               /*
+                * This isn't quite correct for 32-bit binaries
+                * on 64-bit kernels since they can use the
+                * entire 32-bit address space, but it's close
+                * enough.
+                */
+               if (try_at > (void *)0xC0000000)
+#else
+               if (try_at > (void *)0x0000800000000000)
+#endif
+                       try_at = (void *)0x0;
+               if (!(++tries % 10000))
+                       dprintf1("stuck in %s(), tries: %lld\n", __func__, tries);
+               continue;
+       }
+       last = ptr;
+       dprintf3("mpx_mini_alloc(0x%lx) returning: %p\n", sz, ptr);
+       return ptr;
+}
+void mpx_mini_free(void *ptr, long sz)
+{
+       dprintf2("%s() ptr: %p\n", __func__, ptr);
+       if ((unsigned long)ptr > 0x100000000000) {
+               dprintf1("uh oh !!!!!!!!!!!!!!! pointer too high: %p\n", ptr);
+               test_failed();
+       }
+       sz = align_up(sz, sz_alignment);
+       dprintf3("%s() ptr: %p before munmap\n", __func__, ptr);
+       munmap(ptr, sz);
+       dprintf3("%s() ptr: %p DONE\n", __func__, ptr);
+}
+
+#define NR_MALLOCS 100
+struct one_malloc {
+       char *ptr;
+       int nr_filled_btes;
+       unsigned long size;
+};
+struct one_malloc mallocs[NR_MALLOCS];
+
+void free_one_malloc(int index)
+{
+       unsigned long free_ptr;
+       unsigned long mask;
+
+       if (!mallocs[index].ptr)
+               return;
+
+       mpx_mini_free(mallocs[index].ptr, mallocs[index].size);
+       dprintf4("freed[%d]:  %p\n", index, mallocs[index].ptr);
+
+       free_ptr = (unsigned long)mallocs[index].ptr;
+       mask = alignment-1;
+       dprintf4("lowerbits: %lx / %lx mask: %lx\n", free_ptr,
+                       (free_ptr & mask), mask);
+       assert((free_ptr & mask) == 0);
+
+       mallocs[index].ptr = NULL;
+}
+
+#ifdef __i386__
+#define MPX_BOUNDS_TABLE_COVERS 4096
+#else
+#define MPX_BOUNDS_TABLE_COVERS (1 * MB)
+#endif
+void zap_everything(void)
+{
+       long after_zap;
+       long before_zap;
+       int i;
+
+       before_zap = inspect_me(bounds_dir_ptr);
+       dprintf1("zapping everything start: %ld\n", before_zap);
+       for (i = 0; i < NR_MALLOCS; i++)
+               free_one_malloc(i);
+
+       after_zap = inspect_me(bounds_dir_ptr);
+       dprintf1("zapping everything done: %ld\n", after_zap);
+       /*
+        * We only guarantee to empty the thing out if our allocations are
+        * exactly aligned on the boundaries of a boudns table.
+        */
+       if ((alignment >= MPX_BOUNDS_TABLE_COVERS) &&
+           (sz_alignment >= MPX_BOUNDS_TABLE_COVERS)) {
+               if (after_zap != 0)
+                       test_failed();
+
+               assert(after_zap == 0);
+       }
+}
+
+void do_one_malloc(void)
+{
+       static int malloc_counter;
+       long sz;
+       int rand_index = (mpx_random() % NR_MALLOCS);
+       void *ptr = mallocs[rand_index].ptr;
+
+       dprintf3("%s() enter\n", __func__);
+
+       if (ptr) {
+               dprintf3("freeing one malloc at index: %d\n", rand_index);
+               free_one_malloc(rand_index);
+               if (mpx_random() % (NR_MALLOCS*3) == 3) {
+                       int i;
+                       dprintf3("zapping some more\n");
+                       for (i = rand_index; i < NR_MALLOCS; i++)
+                               free_one_malloc(i);
+               }
+               if ((mpx_random() % zap_all_every_this_many_mallocs) == 4)
+                       zap_everything();
+       }
+
+       /* 1->~1M */
+       sz = (1 + mpx_random() % 1000) * 1000;
+       ptr = mpx_mini_alloc(sz);
+       if (!ptr) {
+               /*
+                * If we are failing allocations, just assume we
+                * are out of memory and zap everything.
+                */
+               dprintf3("zapping everything because out of memory\n");
+               zap_everything();
+               goto out;
+       }
+
+       dprintf3("malloc: %p size: 0x%lx\n", ptr, sz);
+       mallocs[rand_index].nr_filled_btes = cover_buf_with_bt_entries(ptr, sz);
+       mallocs[rand_index].ptr = ptr;
+       mallocs[rand_index].size = sz;
+out:
+       if ((++malloc_counter) % inspect_every_this_many_mallocs == 0)
+               inspect_me(bounds_dir_ptr);
+}
+
+void run_timed_test(void (*test_func)(void))
+{
+       int done = 0;
+       long iteration = 0;
+       static time_t last_print;
+       time_t now;
+       time_t start;
+
+       time(&start);
+       while (!done) {
+               time(&now);
+               if ((now - start) > TEST_DURATION_SECS)
+                       done = 1;
+
+               test_func();
+               iteration++;
+
+               if ((now - last_print > 1) || done) {
+                       printf("iteration %ld complete, OK so far\n", iteration);
+                       last_print = now;
+               }
+       }
+}
+
+void check_bounds_table_frees(void)
+{
+       printf("executing unmaptest\n");
+       inspect_me(bounds_dir_ptr);
+       run_timed_test(&do_one_malloc);
+       printf("done with malloc() fun\n");
+}
+
+void insn_test_failed(int test_nr, int test_round, void *buf,
+               void *buf_shadow, void *ptr)
+{
+       print_context(xsave_test_buf);
+       eprintf("ERROR: test %d round %d failed\n", test_nr, test_round);
+       while (test_nr == 5) {
+               struct mpx_bt_entry *bte;
+               struct mpx_bounds_dir *bd = (void *)bounds_dir_ptr;
+               struct mpx_bd_entry *bde = mpx_vaddr_to_bd_entry(buf, bd);
+
+               printf("  bd: %p\n", bd);
+               printf("&bde: %p\n", bde);
+               printf("*bde: %lx\n", *(unsigned long *)bde);
+               if (!bd_entry_valid(bde))
+                       break;
+
+               bte = mpx_vaddr_to_bt_entry(buf, bd);
+               printf(" te: %p\n", bte);
+               printf("bte[0]: %lx\n", bte->contents[0]);
+               printf("bte[1]: %lx\n", bte->contents[1]);
+               printf("bte[2]: %lx\n", bte->contents[2]);
+               printf("bte[3]: %lx\n", bte->contents[3]);
+               break;
+       }
+       test_failed();
+}
+
+void check_mpx_insns_and_tables(void)
+{
+       int successes = 0;
+       int failures  = 0;
+       int buf_size = (1024*1024);
+       unsigned long *buf = malloc(buf_size);
+       const int total_nr_tests = NR_MPX_TEST_FUNCTIONS * TEST_ROUNDS;
+       int i, j;
+
+       memset(buf, 0, buf_size);
+       memset(buf_shadow, 0, sizeof(buf_shadow));
+
+       for (i = 0; i < TEST_ROUNDS; i++) {
+               uint8_t *ptr = get_random_addr() + 8;
+
+               for (j = 0; j < NR_MPX_TEST_FUNCTIONS; j++) {
+                       if (0 && j != 5) {
+                               successes++;
+                               continue;
+                       }
+                       dprintf2("starting test %d round %d\n", j, i);
+                       dprint_context(xsave_test_buf);
+                       /*
+                        * test5 loads an address from the bounds tables.
+                        * The load will only complete if 'ptr' matches
+                        * the load and the store, so with random addrs,
+                        * the odds of this are very small.  Make it
+                        * higher by only moving 'ptr' 1/10 times.
+                        */
+                       if (random() % 10 <= 0)
+                               ptr = get_random_addr() + 8;
+                       dprintf3("random ptr{%p}\n", ptr);
+                       dprint_context(xsave_test_buf);
+                       run_helpers(j, (void *)buf, (void *)buf_shadow, ptr);
+                       dprint_context(xsave_test_buf);
+                       if (!compare_context(xsave_test_buf)) {
+                               insn_test_failed(j, i, buf, buf_shadow, ptr);
+                               failures++;
+                               goto exit;
+                       }
+                       successes++;
+                       dprint_context(xsave_test_buf);
+                       dprintf2("finished test %d round %d\n", j, i);
+                       dprintf3("\n");
+                       dprint_context(xsave_test_buf);
+               }
+       }
+
+exit:
+       dprintf2("\nabout to free:\n");
+       free(buf);
+       dprintf1("successes: %d\n", successes);
+       dprintf1(" failures: %d\n", failures);
+       dprintf1("    tests: %d\n", total_nr_tests);
+       dprintf1(" expected: %jd #BRs\n", num_upper_brs + num_lower_brs);
+       dprintf1("      saw: %d #BRs\n", br_count);
+       if (failures) {
+               eprintf("ERROR: non-zero number of failures\n");
+               exit(20);
+       }
+       if (successes != total_nr_tests) {
+               eprintf("ERROR: succeded fewer than number of tries (%d != %d)\n",
+                               successes, total_nr_tests);
+               exit(21);
+       }
+       if (num_upper_brs + num_lower_brs != br_count) {
+               eprintf("ERROR: unexpected number of #BRs: %jd %jd %d\n",
+                               num_upper_brs, num_lower_brs, br_count);
+               eprintf("successes: %d\n", successes);
+               eprintf(" failures: %d\n", failures);
+               eprintf("    tests: %d\n", total_nr_tests);
+               eprintf(" expected: %jd #BRs\n", num_upper_brs + num_lower_brs);
+               eprintf("      saw: %d #BRs\n", br_count);
+               exit(22);
+       }
+}
+
+/*
+ * This is supposed to SIGSEGV nicely once the kernel
+ * can no longer allocate vaddr space.
+ */
+void exhaust_vaddr_space(void)
+{
+       unsigned long ptr;
+       /* Try to make sure there is no room for a bounds table anywhere */
+       unsigned long skip = MPX_BOUNDS_TABLE_SIZE_BYTES - PAGE_SIZE;
+#ifdef __i386__
+       unsigned long max_vaddr = 0xf7788000UL;
+#else
+       unsigned long max_vaddr = 0x800000000000UL;
+#endif
+
+       dprintf1("%s() start\n", __func__);
+       /* do not start at 0, we aren't allowed to map there */
+       for (ptr = PAGE_SIZE; ptr < max_vaddr; ptr += skip) {
+               void *ptr_ret;
+               int ret = madvise((void *)ptr, PAGE_SIZE, MADV_NORMAL);
+
+               if (!ret) {
+                       dprintf1("madvise() %lx ret: %d\n", ptr, ret);
+                       continue;
+               }
+               ptr_ret = mmap((void *)ptr, PAGE_SIZE, PROT_READ|PROT_WRITE,
+                               MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+               if (ptr_ret != (void *)ptr) {
+                       perror("mmap");
+                       dprintf1("mmap(%lx) ret: %p\n", ptr, ptr_ret);
+                       break;
+               }
+               if (!(ptr & 0xffffff))
+                       dprintf1("mmap(%lx) ret: %p\n", ptr, ptr_ret);
+       }
+       for (ptr = PAGE_SIZE; ptr < max_vaddr; ptr += skip) {
+               dprintf2("covering 0x%lx with bounds table entries\n", ptr);
+               cover_buf_with_bt_entries((void *)ptr, PAGE_SIZE);
+       }
+       dprintf1("%s() end\n", __func__);
+       printf("done with vaddr space fun\n");
+}
+
+void mpx_table_test(void)
+{
+       printf("starting mpx bounds table test\n");
+       run_timed_test(check_mpx_insns_and_tables);
+       printf("done with mpx bounds table test\n");
+}
+
+int main(int argc, char **argv)
+{
+       int unmaptest = 0;
+       int vaddrexhaust = 0;
+       int tabletest = 0;
+       int i;
+
+       check_mpx_support();
+       mpx_prepare();
+       srandom(11179);
+
+       bd_incore();
+       init();
+       bd_incore();
+
+       trace_me();
+
+       xsave_state((void *)xsave_test_buf, 0x1f);
+       if (!compare_context(xsave_test_buf))
+               printf("Init failed\n");
+
+       for (i = 1; i < argc; i++) {
+               if (!strcmp(argv[i], "unmaptest"))
+                       unmaptest = 1;
+               if (!strcmp(argv[i], "vaddrexhaust"))
+                       vaddrexhaust = 1;
+               if (!strcmp(argv[i], "tabletest"))
+                       tabletest = 1;
+       }
+       if (!(unmaptest || vaddrexhaust || tabletest)) {
+               unmaptest = 1;
+               /* vaddrexhaust = 1; */
+               tabletest = 1;
+       }
+       if (unmaptest)
+               check_bounds_table_frees();
+       if (tabletest)
+               mpx_table_test();
+       if (vaddrexhaust)
+               exhaust_vaddr_space();
+       printf("%s completed successfully\n", argv[0]);
+       exit(0);
+}
+
+#include "mpx-dig.c"
diff --git a/tools/testing/selftests/x86/mpx-mm.h b/tools/testing/selftests/x86/mpx-mm.h
new file mode 100644 (file)
index 0000000..af706a5
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _MPX_MM_H
+#define _MPX_MM_H
+
+#define PAGE_SIZE 4096
+#define MB (1UL<<20)
+
+extern long nr_incore(void *ptr, unsigned long size_bytes);
+
+#endif /* _MPX_MM_H */
diff --git a/tools/testing/selftests/x86/test_mremap_vdso.c b/tools/testing/selftests/x86/test_mremap_vdso.c
new file mode 100644 (file)
index 0000000..bf0d687
--- /dev/null
@@ -0,0 +1,111 @@
+/*
+ * 32-bit test to check vDSO mremap.
+ *
+ * Copyright (c) 2016 Dmitry Safonov
+ * Suggested-by: Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Can be built statically:
+ * gcc -Os -Wall -static -m32 test_mremap_vdso.c
+ */
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+
+#include <sys/mman.h>
+#include <sys/auxv.h>
+#include <sys/syscall.h>
+#include <sys/wait.h>
+
+#define PAGE_SIZE      4096
+
+static int try_to_remap(void *vdso_addr, unsigned long size)
+{
+       void *dest_addr, *new_addr;
+
+       /* Searching for memory location where to remap */
+       dest_addr = mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+       if (dest_addr == MAP_FAILED) {
+               printf("[WARN]\tmmap failed (%d): %m\n", errno);
+               return 0;
+       }
+
+       printf("[NOTE]\tMoving vDSO: [%p, %#lx] -> [%p, %#lx]\n",
+               vdso_addr, (unsigned long)vdso_addr + size,
+               dest_addr, (unsigned long)dest_addr + size);
+       fflush(stdout);
+
+       new_addr = mremap(vdso_addr, size, size,
+                       MREMAP_FIXED|MREMAP_MAYMOVE, dest_addr);
+       if ((unsigned long)new_addr == (unsigned long)-1) {
+               munmap(dest_addr, size);
+               if (errno == EINVAL) {
+                       printf("[NOTE]\tvDSO partial move failed, will try with bigger size\n");
+                       return -1; /* Retry with larger */
+               }
+               printf("[FAIL]\tmremap failed (%d): %m\n", errno);
+               return 1;
+       }
+
+       return 0;
+
+}
+
+int main(int argc, char **argv, char **envp)
+{
+       pid_t child;
+
+       child = fork();
+       if (child == -1) {
+               printf("[WARN]\tfailed to fork (%d): %m\n", errno);
+               return 1;
+       }
+
+       if (child == 0) {
+               unsigned long vdso_size = PAGE_SIZE;
+               unsigned long auxval;
+               int ret = -1;
+
+               auxval = getauxval(AT_SYSINFO_EHDR);
+               printf("\tAT_SYSINFO_EHDR is %#lx\n", auxval);
+               if (!auxval || auxval == -ENOENT) {
+                       printf("[WARN]\tgetauxval failed\n");
+                       return 0;
+               }
+
+               /* Simpler than parsing ELF header */
+               while (ret < 0) {
+                       ret = try_to_remap((void *)auxval, vdso_size);
+                       vdso_size += PAGE_SIZE;
+               }
+
+               /* Glibc is likely to explode now - exit with raw syscall */
+               asm volatile ("int $0x80" : : "a" (__NR_exit), "b" (!!ret));
+       } else {
+               int status;
+
+               if (waitpid(child, &status, 0) != child ||
+                       !WIFEXITED(status)) {
+                       printf("[FAIL]\tmremap() of the vDSO does not work on this kernel!\n");
+                       return 1;
+               } else if (WEXITSTATUS(status) != 0) {
+                       printf("[FAIL]\tChild failed with %d\n",
+                                       WEXITSTATUS(status));
+                       return 1;
+               }
+               printf("[OK]\n");
+       }
+
+       return 0;
+}
index 7cf6e1769903976f400b94d3c0c3a8734002b51a..b9d34b37c017be2ea35b9083877965f8aa224060 100644 (file)
@@ -510,10 +510,11 @@ static void slab_stats(struct slabinfo *s)
                        s->alloc_node_mismatch, (s->alloc_node_mismatch * 100) / total);
        }
 
-       if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail)
+       if (s->cmpxchg_double_fail || s->cmpxchg_double_cpu_fail) {
                printf("\nCmpxchg_double Looping\n------------------------\n");
                printf("Locked Cmpxchg Double redos   %lu\nUnlocked Cmpxchg Double redos %lu\n",
                        s->cmpxchg_double_fail, s->cmpxchg_double_cpu_fail);
+       }
 }
 
 static void report(struct slabinfo *s)